aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_processor.c182
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h5
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c4
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c23
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c12
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/arm64/iort.c57
-rw-r--r--drivers/acpi/battery.c75
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c1
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/ec.c16
-rw-r--r--drivers/acpi/fan.c97
-rw-r--r--drivers/acpi/pptt.c29
-rw-r--r--drivers/acpi/processor_idle.c174
-rw-r--r--drivers/acpi/sleep.c3
-rw-r--r--drivers/acpi/video_detect.c29
-rw-r--r--drivers/ata/acard-ahci.c4
-rw-r--r--drivers/ata/ahci_brcm.c70
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/atm/eni.c8
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/atm/fore200e.c25
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile2
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/power/wakeup.c3
-rw-r--r--drivers/base/regmap/regmap-i2c.c10
-rw-r--r--drivers/base/regmap/regmap.c17
-rw-r--r--drivers/base/swnode.c154
-rw-r--r--drivers/base/test/Kconfig3
-rw-r--r--drivers/base/test/Makefile2
-rw-r--r--drivers/base/test/property-entry-test.c475
-rw-r--r--drivers/bcma/driver_chipcommon_b.c2
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/scan.c13
-rw-r--r--drivers/block/null_blk_zoned.c4
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/bluetooth/btbcm.c48
-rw-r--r--drivers/bluetooth/btbcm.h16
-rw-r--r--drivers/bluetooth/btrtl.c20
-rw-r--r--drivers/bluetooth/btsdio.c19
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/bluetooth/hci_bcm.c73
-rw-r--r--drivers/bluetooth/hci_h4.c1
-rw-r--r--drivers/bluetooth/hci_h5.c3
-rw-r--r--drivers/bluetooth/hci_qca.c418
-rw-r--r--drivers/bluetooth/hci_uart.h7
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/bus/fsl-mc/mc-io.c4
-rw-r--r--drivers/bus/ti-sysc.c10
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/tpm/tpm-sysfs.c34
-rw-r--r--drivers/clk/clk.c10
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c7
-rw-r--r--drivers/clk/renesas/clk-rz.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.h2
-rw-r--r--drivers/clk/tegra/clk.c4
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c1
-rw-r--r--drivers/clocksource/Kconfig76
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/bcm2835_timer.c5
-rw-r--r--drivers/clocksource/em_sti.c7
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c84
-rw-r--r--drivers/clocksource/sh_cmt.c2
-rw-r--r--drivers/clocksource/sh_mtu2.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c26
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c451
-rw-r--r--drivers/clocksource/timer-ti-dm.c20
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c8
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c12
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c11
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpuidle/Kconfig.arm12
-rw-r--r--drivers/cpuidle/coupled.c9
-rw-r--r--drivers/cpuidle/cpuidle-clps711x.c5
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/cpuidle/driver.c46
-rw-r--r--drivers/cpuidle/governors/teo.c2
-rw-r--r--drivers/cpuidle/sysfs.c16
-rw-r--r--drivers/crypto/Kconfig89
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c24
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c31
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c37
-rw-r--r--drivers/crypto/amlogic/Kconfig1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-aes.c359
-rw-r--r--drivers/crypto/atmel-authenc.h3
-rw-r--r--drivers/crypto/atmel-sha.c473
-rw-r--r--drivers/crypto/atmel-tdes.c375
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c17
-rw-r--r--drivers/crypto/caam/Kconfig14
-rw-r--r--drivers/crypto/caam/caamalg.c33
-rw-r--r--drivers/crypto/caam/caamalg_qi.c44
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c206
-rw-r--r--drivers/crypto/caam/caamhash.c167
-rw-r--r--drivers/crypto/caam/ctrl.c15
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c12
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c1
-rw-r--r--drivers/crypto/ccp/psp-dev.c1042
-rw-r--r--drivers/crypto/ccp/psp-dev.h51
-rw-r--r--drivers/crypto/ccp/sev-dev.c1077
-rw-r--r--drivers/crypto/ccp/sev-dev.h63
-rw-r--r--drivers/crypto/ccp/sp-dev.h17
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/ccp/tee-dev.c375
-rw-r--r--drivers/crypto/ccp/tee-dev.h110
-rw-r--r--drivers/crypto/ccree/cc_aead.c43
-rw-r--r--drivers/crypto/ccree/cc_cipher.c58
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_fips.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c8
-rw-r--r--drivers/crypto/ccree/cc_pm.c39
-rw-r--r--drivers/crypto/ccree/cc_pm.h17
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c103
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h8
-rw-r--r--drivers/crypto/chelsio/Kconfig30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c59
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h21
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c65
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c28
-rw-r--r--drivers/crypto/geode-aes.c24
-rw-r--r--drivers/crypto/hifn_795x.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig11
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c141
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c60
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h53
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c963
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h22
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c19
-rw-r--r--drivers/crypto/hisilicon/sgl.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c92
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel.h34
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c600
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c36
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c130
-rw-r--r--drivers/crypto/ixp4xx_crypto.c31
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c4
-rw-r--r--drivers/crypto/mxs-dcp.c12
-rw-r--r--drivers/crypto/n2_core.c1
-rw-r--r--drivers/crypto/omap-aes-gcm.c223
-rw-r--r--drivers/crypto/omap-aes.c142
-rw-r--r--drivers/crypto/omap-aes.h12
-rw-r--r--drivers/crypto/omap-crypto.c37
-rw-r--r--drivers/crypto/omap-des.c13
-rw-r--r--drivers/crypto/omap-sham.c191
-rw-r--r--drivers/crypto/padlock-aes.c9
-rw-r--r--drivers/crypto/padlock-sha.c26
-rw-r--r--drivers/crypto/picoxcell_crypto.c30
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/common.c244
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c9
-rw-r--r--drivers/crypto/stm32/Kconfig6
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/talitos.c15
-rw-r--r--drivers/crypto/ux500/Kconfig16
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c8
-rw-r--r--drivers/crypto/vmx/aes_xts.c3
-rw-r--r--drivers/devfreq/Kconfig21
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c166
-rw-r--r--drivers/devfreq/event/Kconfig6
-rw-r--r--drivers/devfreq/event/exynos-nocp.c2
-rw-r--r--drivers/devfreq/event/exynos-nocp.h2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c15
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c5
-rw-r--r--drivers/devfreq/exynos-bus.c155
-rw-r--r--drivers/devfreq/imx8m-ddrc.c471
-rw-r--r--drivers/devfreq/rk3399_dmc.c19
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dma-axi-dmac.c10
-rw-r--r--drivers/dma/dma-jz4780.c7
-rw-r--r--drivers/dma/dmaengine.c628
-rw-r--r--drivers/dma/dmaengine.h11
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c8
-rw-r--r--drivers/dma/fsl-edma-common.c5
-rw-r--r--drivers/dma/fsl-edma-common.h1
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsl-qdma.c2
-rw-r--r--drivers/dma/hisi_dma.c611
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c302
-rw-r--r--drivers/dma/idxd/device.c693
-rw-r--r--drivers/dma/idxd/dma.c217
-rw-r--r--drivers/dma/idxd/idxd.h316
-rw-r--r--drivers/dma/idxd/init.c533
-rw-r--r--drivers/dma/idxd/irq.c261
-rw-r--r--drivers/dma/idxd/registers.h336
-rw-r--r--drivers/dma/idxd/submit.c95
-rw-r--r--drivers/dma/idxd/sysfs.c1528
-rw-r--r--drivers/dma/imx-sdma.c37
-rw-r--r--drivers/dma/ioat/init.c38
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c3
-rw-r--r--drivers/dma/of-dma.c2
-rw-r--r--drivers/dma/owl-dma.c3
-rw-r--r--drivers/dma/pl330.c16
-rw-r--r--drivers/dma/plx_dma.c639
-rw-r--r--drivers/dma/s3c24xx-dma.c24
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c4
-rw-r--r--drivers/dma/sun4i-dma.c48
-rw-r--r--drivers/dma/ti/Kconfig24
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/edma.c39
-rw-r--r--drivers/dma/ti/k3-psil-am654.c175
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c222
-rw-r--r--drivers/dma/ti/k3-psil-priv.h43
-rw-r--r--drivers/dma/ti/k3-psil.c90
-rw-r--r--drivers/dma/ti/k3-udma-glue.c1198
-rw-r--r--drivers/dma/ti/k3-udma-private.c133
-rw-r--r--drivers/dma/ti/k3-udma.c3432
-rw-r--r--drivers/dma/ti/k3-udma.h151
-rw-r--r--drivers/dma/virt-dma.c10
-rw-r--r--drivers/dma/virt-dma.h27
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c24
-rw-r--r--drivers/edac/Kconfig3
-rw-r--r--drivers/edac/amd64_edac.c65
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/aspeed_edac.c4
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c7
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/mce_amd.c105
-rw-r--r--drivers/edac/sifive_edac.c4
-rw-r--r--drivers/edac/skx_common.c2
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/efi/Kconfig22
-rw-r--r--drivers/firmware/efi/arm-init.c107
-rw-r--r--drivers/firmware/efi/capsule-loader.c1
-rw-r--r--drivers/firmware/efi/earlycon.c16
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/fake_mem.c43
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c110
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c70
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c290
-rw-r--r--drivers/firmware/efi/libstub/efistub.h48
-rw-r--r--drivers/firmware/efi/libstub/fdt.c53
-rw-r--r--drivers/firmware/efi/libstub/gop.c163
-rw-r--r--drivers/firmware/efi/libstub/pci.c114
-rw-r--r--drivers/firmware/efi/libstub/random.c79
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c11
-rw-r--r--drivers/firmware/efi/libstub/tpm.c48
-rw-r--r--drivers/firmware/efi/memmap.c95
-rw-r--r--drivers/gpio/Kconfig30
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO46
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c12
-rw-r--r--drivers/gpio/gpio-creg-snps.c4
-rw-r--r--drivers/gpio/gpio-grgpio.c15
-rw-r--r--drivers/gpio/gpio-logicvc.c170
-rw-r--r--drivers/gpio/gpio-lynxpoint.c471
-rw-r--r--drivers/gpio/gpio-mockup.c16
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mt7621.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c5
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c1
-rw-r--r--drivers/gpio/gpio-sifive.c252
-rw-r--r--drivers/gpio/gpio-tb10x.c1
-rw-r--r--drivers/gpio/gpio-tegra.c21
-rw-r--r--drivers/gpio/gpio-tegra186.c13
-rw-r--r--drivers/gpio/gpio-thunderx.c36
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c121
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c3
-rw-r--r--drivers/gpio/gpiolib-of.c21
-rw-r--r--drivers/gpio/gpiolib-sysfs.c7
-rw-r--r--drivers/gpio/gpiolib.c218
-rw-r--r--drivers/gpio/gpiolib.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c179
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c91
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h41
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c61
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c247
-rw-r--r--drivers/hid/hidraw.c9
-rw-r--r--drivers/hv/hv_util.c8
-rw-r--r--drivers/hwmon/Kconfig37
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1177.c288
-rw-r--r--drivers/hwmon/adt7475.c5
-rw-r--r--drivers/hwmon/drivetemp.c574
-rw-r--r--drivers/hwmon/hwmon.c85
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/k10temp.c489
-rw-r--r--drivers/hwmon/max31730.c440
-rw-r--r--drivers/hwmon/nct7802.c75
-rw-r--r--drivers/hwmon/pmbus/Kconfig32
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c89
-rw-r--r--drivers/hwmon/pmbus/max20730.c372
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c6
-rw-r--r--drivers/hwmon/pmbus/pmbus.h15
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c22
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c44
-rw-r--r--drivers/hwmon/pmbus/tps53679.c46
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c39
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c117
-rw-r--r--drivers/hwmon/pwm-fan.c15
-rw-r--r--drivers/hwmon/w83627ehf.c1969
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c13
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c12
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c38
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/i3c/master/dw-i3c-master.c20
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c53
-rw-r--r--drivers/idle/intel_idle.c484
-rw-r--r--drivers/iio/adc/ad7124.c12
-rw-r--r--drivers/iio/chemical/Kconfig1
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c3
-rw-r--r--drivers/iio/industrialio-buffer.c6
-rw-r--r--drivers/iio/light/vcnl4000.c3
-rw-r--r--drivers/infiniband/core/umem.c27
-rw-r--r--drivers/infiniband/core/umem_odp.c29
-rw-r--r--drivers/infiniband/core/verbs.c41
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h8
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c63
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c20
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c33
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c167
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c12
-rw-r--r--drivers/input/evdev.c5
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/misc/keyspan_remote.c9
-rw-r--r--drivers/input/misc/max77650-onkey.c7
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c2
-rw-r--r--drivers/input/mouse/pxa930_trkball.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c43
-rw-r--r--drivers/input/rmi4/rmi_smbus.c2
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/tablet/aiptek.c8
-rw-r--r--drivers/input/tablet/gtco.c13
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c6
-rw-r--r--drivers/input/touchscreen/sur40.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c26
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/ipack/carriers/tpci200.c4
-rw-r--r--drivers/ipack/devices/ipoctal.c6
-rw-r--r--drivers/irqchip/Kconfig14
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c239
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c698
-rw-r--r--drivers/irqchip/irq-gic-v3.c24
-rw-r--r--drivers/irqchip/irq-imx-intmux.c309
-rw-r--r--drivers/irqchip/irq-ingenic.c6
-rw-r--r--drivers/irqchip/irq-mbigen.c1
-rw-r--r--drivers/irqchip/irq-meson-gpio.c137
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/irq-nvic.c15
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c30
-rw-r--r--drivers/leds/leds-as3645a.c3
-rw-r--r--drivers/leds/leds-gpio.c10
-rw-r--r--drivers/leds/leds-lm3532.c3
-rw-r--r--drivers/leds/leds-max77650.c7
-rw-r--r--drivers/leds/leds-rb532.c1
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/lightnvm/pblk-trace.h8
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c5
-rw-r--r--drivers/md/bcache/btree.c24
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/journal.c80
-rw-r--r--drivers/md/bcache/super.c136
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/md-bitmap.c25
-rw-r--r--drivers/md/md.c254
-rw-r--r--drivers/md/md.h45
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c111
-rw-r--r--drivers/md/raid5.c21
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c6
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c2
-rw-r--r--drivers/message/fusion/mptctl.c213
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/lkdtm/bugs.c12
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/vmw_balloon.c1
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/host.c33
-rw-r--r--drivers/mmc/core/mmc_ops.c34
-rw-r--r--drivers/mmc/core/slot-gpio.c31
-rw-r--r--drivers/mmc/host/Kconfig6
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mmc/host/au1xmmc.c7
-rw-r--r--drivers/mmc/host/bcm2835.c12
-rw-r--r--drivers/mmc/host/cavium-thunderx.c16
-rw-r--r--drivers/mmc/host/davinci_mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c8
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c10
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c4
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/mmci.c114
-rw-r--r--drivers/mmc/host/mmci.h10
-rw-r--r--drivers/mmc/host/mtk-sd.c3
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c10
-rw-r--r--drivers/mmc/host/owl-mmc.c6
-rw-r--r--drivers/mmc/host/pxamci.c26
-rw-r--r--drivers/mmc/host/renesas_sdhi.h10
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c22
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c25
-rw-r--r--drivers/mmc/host/s3cmci.c4
-rw-r--r--drivers/mmc/host/sdhci-acpi.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c270
-rw-r--r--drivers/mmc/host/sdhci-cadence.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c18
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c8
-rw-r--r--drivers/mmc/host/sdhci-msm.c139
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c112
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c248
-rw-r--r--drivers/mmc/host/sdhci-omap.c60
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci-sirf.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c397
-rw-r--r--drivers/mmc/host/sdhci.h13
-rw-r--r--drivers/mmc/host/sdhci_am654.c58
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c4
-rw-r--r--drivers/mmc/host/sh_mmcif.c12
-rw-r--r--drivers/mmc/host/sunxi-mmc.c3
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/uniphier-sd.c14
-rw-r--r--drivers/mmc/host/usdhi6rol0.c27
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c2
-rw-r--r--drivers/mtd/maps/amd76xrom.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c2
-rw-r--r--drivers/mtd/maps/esb2rom.c2
-rw-r--r--drivers/mtd/maps/ichxrom.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c4
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c8
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c6
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c11
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c6
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c122
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/slcan.c12
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/dsa/Kconfig5
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c66
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/lan9303-core.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c100
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h9
-rw-r--r--drivers/net/dsa/ocelot/Kconfig2
-rw-r--r--drivers/net/dsa/ocelot/felix.c271
-rw-r--r--drivers/net/dsa/ocelot/felix.h16
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c521
-rw-r--r--drivers/net/dsa/qca/Kconfig9
-rw-r--r--drivers/net/dsa/qca/Makefile2
-rw-r--r--drivers/net/dsa/qca/ar9331.c856
-rw-r--r--drivers/net/dsa/qca8k.c3
-rw-r--r--drivers/net/dsa/rtl8366rb.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c125
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c36
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h1
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c5
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/8390.c4
-rw-r--r--drivers/net/ethernet/8390/8390.h4
-rw-r--r--drivers/net/ethernet/8390/8390p.c4
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c4
-rw-r--r--drivers/net/ethernet/agere/et131x.c13
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c17
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c959
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h73
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/a2065.c13
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c21
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c14
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c15
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c7
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c238
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c133
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c91
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c253
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c71
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c4
-rw-r--r--drivers/net/ethernet/dnet.c15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c13
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c120
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c43
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c47
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c14
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c12
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c7
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c268
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h139
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c441
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c6
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c73
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c19
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c218
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c300
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2563
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c400
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c485
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c9
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c51
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h47
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h102
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c2945
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c716
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c13
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c27
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1410
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h615
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c662
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1349
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h147
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c848
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h162
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c314
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c758
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c502
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h152
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c587
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c874
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h9
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ana.h625
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev.h275
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h270
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c382
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h46
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c498
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c116
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c113
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c58
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h97
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c249
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c23
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c358
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h130
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3891
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c128
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2564
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c521
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Makefile2
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h78
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1485
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c1307
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c58
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c6
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/Makefile9
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2822
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c2501
-rw-r--r--drivers/net/ethernet/sfc/efx.h65
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c1234
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h55
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1102
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h73
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c446
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c457
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h30
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c2270
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h159
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c386
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c558
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c568
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.h57
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h20
-rw-r--r--drivers/net/ethernet/sfc/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/rx.c592
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c851
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h97
-rw-r--r--drivers/net/ethernet/sfc/selftest.c9
-rw-r--r--drivers/net/ethernet/sfc/selftest.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c398
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c404
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h36
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c11
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/socionext/netsec.c55
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c316
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c162
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.h1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c17
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/Kconfig14
-rw-r--r--drivers/net/ethernet/xscale/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp46x_ts.h68
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c213
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c (renamed from drivers/ptp/ptp_ixp46x.c)3
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c16
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/fjes/fjes_main.c4
-rw-r--r--drivers/net/fjes/fjes_trace.h2
-rw-r--r--drivers/net/gtp.c19
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hyperv/Makefile2
-rw-r--r--drivers/net/hyperv/hyperv_net.h21
-rw-r--r--drivers/net/hyperv/netvsc.c31
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c209
-rw-r--r--drivers/net/hyperv/netvsc_drv.c183
-rw-r--r--drivers/net/hyperv/rndis_filter.c4
-rw-r--r--drivers/net/macsec.c787
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/netdevsim/dev.c4
-rw-r--r--drivers/net/netdevsim/fib.c674
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c12
-rw-r--r--drivers/net/phy/aquantia_main.c7
-rw-r--r--drivers/net/phy/bcm84881.c269
-rw-r--r--drivers/net/phy/dp83640.c217
-rw-r--r--drivers/net/phy/dp83822.c18
-rw-r--r--drivers/net/phy/dp83867.c70
-rw-r--r--drivers/net/phy/dp83869.c2
-rw-r--r--drivers/net/phy/fixed_phy.c11
-rw-r--r--drivers/net/phy/lxt.c24
-rw-r--r--drivers/net/phy/marvell.c209
-rw-r--r--drivers/net/phy/marvell10g.c13
-rw-r--r--drivers/net/phy/mdio-i2c.c28
-rw-r--r--drivers/net/phy/mdio_bus.c267
-rw-r--r--drivers/net/phy/mii_timestamper.c125
-rw-r--r--drivers/net/phy/mscc.c1139
-rw-r--r--drivers/net/phy/mscc_fc_buffer.h64
-rw-r--r--drivers/net/phy/mscc_mac.h159
-rw-r--r--drivers/net/phy/mscc_macsec.h266
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c29
-rw-r--r--drivers/net/phy/phy_device.c115
-rw-r--r--drivers/net/phy/phylink.c345
-rw-r--r--drivers/net/phy/realtek.c59
-rw-r--r--drivers/net/phy/sfp-bus.c124
-rw-r--r--drivers/net/phy/sfp.c199
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/uPD60620.c7
-rw-r--r--drivers/net/ppp/ppp_async.c18
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pptp.c5
-rw-r--r--drivers/net/slip/slip.c14
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88172a.c13
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/ch9200.c24
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c28
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c142
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vxlan.c21
-rw-r--r--drivers/net/wan/Kconfig3
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c18
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_x25.c93
-rw-r--r--drivers/net/wan/ixp4xx_hss.c39
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/wanxl.c4
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wireguard/Makefile18
-rw-r--r--drivers/net/wireguard/allowedips.c376
-rw-r--r--drivers/net/wireguard/allowedips.h59
-rw-r--r--drivers/net/wireguard/cookie.c236
-rw-r--r--drivers/net/wireguard/cookie.h59
-rw-r--r--drivers/net/wireguard/device.c458
-rw-r--r--drivers/net/wireguard/device.h65
-rw-r--r--drivers/net/wireguard/main.c63
-rw-r--r--drivers/net/wireguard/messages.h128
-rw-r--r--drivers/net/wireguard/netlink.c642
-rw-r--r--drivers/net/wireguard/netlink.h12
-rw-r--r--drivers/net/wireguard/noise.c828
-rw-r--r--drivers/net/wireguard/noise.h137
-rw-r--r--drivers/net/wireguard/peer.c240
-rw-r--r--drivers/net/wireguard/peer.h83
-rw-r--r--drivers/net/wireguard/peerlookup.c221
-rw-r--r--drivers/net/wireguard/peerlookup.h64
-rw-r--r--drivers/net/wireguard/queueing.c53
-rw-r--r--drivers/net/wireguard/queueing.h194
-rw-r--r--drivers/net/wireguard/ratelimiter.c223
-rw-r--r--drivers/net/wireguard/ratelimiter.h19
-rw-r--r--drivers/net/wireguard/receive.c595
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c683
-rw-r--r--drivers/net/wireguard/selftest/counter.c104
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c226
-rw-r--r--drivers/net/wireguard/send.c413
-rw-r--r--drivers/net/wireguard/socket.c438
-rw-r--r--drivers/net/wireguard/socket.h44
-rw-r--r--drivers/net/wireguard/timers.c243
-rw-r--r--drivers/net/wireguard/timers.h31
-rw-r--r--drivers/net/wireguard/version.h1
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c65
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c232
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig35
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile25
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c1003
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c808
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h183
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c795
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h826
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c1075
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h279
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c4570
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h1662
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c543
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c899
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1535
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c4195
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h86
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c962
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h40
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c1124
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h897
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2468
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c1190
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h332
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c154
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h69
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c773
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h313
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h127
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c5907
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h147
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c236
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2433
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h445
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c702
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h1212
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c199
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h50
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h113
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c5810
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h4764
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c88
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h33
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c165
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c54
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c70
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c20
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c239
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c80
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c7
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c75
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c73
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h52
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h103
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c48
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c299
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h853
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c112
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h102
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h115
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c389
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h186
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c91
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h72
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h29
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c24
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c890
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.h58
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c49
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/hash.c6
-rw-r--r--drivers/net/xen-netback/interface.c10
-rw-r--r--drivers/net/xen-netback/netback.c20
-rw-r--r--drivers/net/xen-netback/xenbus.c349
-rw-r--r--drivers/nfc/pn533/i2c.c1
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/of/of_mdio.c30
-rw-r--r--drivers/opp/core.c48
-rw-r--r--drivers/opp/of.c31
-rw-r--r--drivers/opp/opp.h6
-rw-r--r--drivers/opp/ti-opp-supply.c2
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c8
-rw-r--r--drivers/parisc/sba_iommu.c4
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/quirks.c23
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c58
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/Kconfig3
-rw-r--r--drivers/phy/broadcom/Kconfig4
-rw-r--r--drivers/phy/broadcom/Makefile2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c120
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c414
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c226
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h148
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c269
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c709
-rw-r--r--drivers/phy/hisilicon/Kconfig16
-rw-r--r--drivers/phy/intel/Kconfig9
-rw-r--r--drivers/phy/intel/Makefile2
-rw-r--r--drivers/phy/intel/phy-intel-emmc.c284
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c2
-rw-r--r--drivers/phy/marvell/Kconfig8
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c20
-rw-r--r--drivers/phy/mediatek/Kconfig25
-rw-r--r--drivers/phy/phy-core.c49
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c319
-rw-r--r--drivers/phy/samsung/Kconfig6
-rw-r--r--drivers/phy/ti/Kconfig20
-rw-r--r--drivers/phy/ti/Makefile1
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c959
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c18
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c170
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c212
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c387
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c50
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h38
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c1
-rw-r--r--drivers/pinctrl/core.c74
-rw-r--r--drivers/pinctrl/core.h4
-rw-r--r--drivers/pinctrl/devicetree.c4
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mp.c345
-rw-r--r--drivers/pinctrl/intel/Kconfig13
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c311
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c101
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h44
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c975
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c547
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c7
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c16
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c637
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c78
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/Kconfig16
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c57
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77950.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c)26
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77951.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7795.c)39
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c33
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c39
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c13
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c28
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c19
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c14
-rw-r--r--drivers/platform/x86/Kconfig18
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c132
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c27
-rw-r--r--drivers/platform/x86/intel-hid.c1
-rw-r--r--drivers/platform/x86/intel-uncore-frequency.c437
-rw-r--r--drivers/platform/x86/intel_atomisp2_pm.c25
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c81
-rw-r--r--drivers/platform/x86/intel_ips.h2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c5
-rw-r--r--drivers/platform/x86/intel_pmc_core.c141
-rw-r--r--drivers/platform/x86/intel_pmc_core.h6
-rw-r--r--drivers/platform/x86/intel_pmc_core_pltdrv.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c114
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c414
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.c3
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c14
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c64
-rw-r--r--drivers/platform/x86/mlx-platform.c564
-rw-r--r--drivers/platform/x86/pmc_atom.c2
-rw-r--r--drivers/platform/x86/samsung-laptop.c4
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c82
-rw-r--r--drivers/pnp/isapnp/core.c25
-rw-r--r--drivers/power/avs/Kconfig16
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/qcom-cpr.c1793
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/ptp/Kconfig24
-rw-r--r--drivers/ptp/Makefile4
-rw-r--r--drivers/ptp/idt8a340_reg.h2
-rw-r--r--drivers/ptp/ptp_clock.c10
-rw-r--r--drivers/ptp/ptp_clockmatrix.c79
-rw-r--r--drivers/ptp/ptp_ines.c852
-rw-r--r--drivers/ptp/ptp_qoriq.c15
-rw-r--r--drivers/regulator/Kconfig40
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/bd71828-regulator.c807
-rw-r--r--drivers/regulator/bd718x7-regulator.c34
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/da9211-regulator.c5
-rw-r--r--drivers/regulator/helpers.c14
-rw-r--r--drivers/regulator/isl9305.c5
-rw-r--r--drivers/regulator/lp3971.c5
-rw-r--r--drivers/regulator/ltc3676.c5
-rw-r--r--drivers/regulator/mp8859.c156
-rw-r--r--drivers/regulator/mpq7920.c330
-rw-r--r--drivers/regulator/mpq7920.h69
-rw-r--r--drivers/regulator/mt6311-regulator.c5
-rw-r--r--drivers/regulator/pv88060-regulator.c5
-rw-r--r--drivers/regulator/pv88090-regulator.c5
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/s2mpa01.c2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/regulator/slg51000-regulator.c5
-rw-r--r--drivers/regulator/sy8106a-regulator.c5
-rw-r--r--drivers/regulator/sy8824x.c5
-rw-r--r--drivers/regulator/ti-abb-regulator.c4
-rw-r--r--drivers/regulator/tps65132-regulator.c5
-rw-r--r--drivers/regulator/vctrl-regulator.c38
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c101
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c5
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c1
-rw-r--r--drivers/s390/net/qeth_core.h98
-rw-r--r--drivers/s390/net/qeth_core_main.c522
-rw-r--r--drivers/s390/net/qeth_core_mpc.h21
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2.h1
-rw-r--r--drivers/s390/net/qeth_l2_main.c166
-rw-r--r--drivers/s390/net/qeth_l2_sys.c34
-rw-r--r--drivers/s390/net/qeth_l3.h6
-rw-r--r--drivers/s390/net/qeth_l3_main.c306
-rw-r--r--drivers/s390/net/qeth_l3_sys.c172
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c20
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c4
-rw-r--r--drivers/scsi/sd.c13
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/scsi/zorro_esp.c6
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/userimask.c2
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c24
-rw-r--r--drivers/soc/tegra/flowctrl.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c4
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/k3-ringacc.c1157
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c4
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c4
-rw-r--r--drivers/soundwire/intel.c20
-rw-r--r--drivers/soundwire/intel.h13
-rw-r--r--drivers/soundwire/intel_init.c32
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-atmel.c29
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-bcm2835.c47
-rw-r--r--drivers/spi/spi-bitbang.c21
-rw-r--r--drivers/spi/spi-dw-mid.c2
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c12
-rw-r--r--drivers/spi/spi-fsl-lpspi.c36
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c27
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c284
-rw-r--r--drivers/spi/spi-img-spfi.c18
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-jcore.c2
-rw-r--r--drivers/spi/spi-meson-spicc.c25
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-npcm-fiu.c2
-rw-r--r--drivers/spi/spi-npcm-pspi.c57
-rw-r--r--drivers/spi/spi-oc-tiny.c50
-rw-r--r--drivers/spi/spi-pxa2xx.c31
-rw-r--r--drivers/spi/spi-qcom-qspi.c9
-rw-r--r--drivers/spi/spi-rspi.c23
-rw-r--r--drivers/spi/spi-sh-msiof.c471
-rw-r--r--drivers/spi/spi-sirf.c12
-rw-r--r--drivers/spi/spi-stm32-qspi.c30
-rw-r--r--drivers/spi/spi-stm32.c79
-rw-r--r--drivers/spi/spi-tegra114.c4
-rw-r--r--drivers/spi/spi-ti-qspi.c87
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-uniphier.c227
-rw-r--r--drivers/spi/spi.c24
-rw-r--r--drivers/ssb/driver_extif.c2
-rw-r--r--drivers/ssb/driver_pcicore.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c12
-rw-r--r--drivers/staging/gasket/gasket_core.c2
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c4
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c2
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c2
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c4
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c4
-rw-r--r--drivers/staging/qlge/qlge_main.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rts5208/rtsx.c2
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/uwb/whc-rc.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/tee/Kconfig4
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/Kconfig8
-rw-r--r--drivers/tee/amdtee/Makefile5
-rw-r--r--drivers/tee/amdtee/amdtee_if.h183
-rw-r--r--drivers/tee/amdtee/amdtee_private.h159
-rw-r--r--drivers/tee/amdtee/call.c373
-rw-r--r--drivers/tee/amdtee/core.c518
-rw-r--r--drivers/tee/amdtee/shm_pool.c93
-rw-r--r--drivers/tee/optee/Kconfig1
-rw-r--r--drivers/tee/optee/shm_pool.c15
-rw-r--r--drivers/thermal/Kconfig35
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/amlogic_thermal.c6
-rw-r--r--drivers/thermal/armada_thermal.c5
-rw-r--r--drivers/thermal/broadcom/Kconfig7
-rw-r--r--drivers/thermal/broadcom/Makefile1
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c123
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c96
-rw-r--r--drivers/thermal/clock_cooling.c2
-rw-r--r--drivers/thermal/cpufreq_cooling.c (renamed from drivers/thermal/cpu_cooling.c)7
-rw-r--r--drivers/thermal/cpuidle_cooling.c232
-rw-r--r--drivers/thermal/db8500_thermal.c4
-rw-r--r--drivers/thermal/devfreq_cooling.c3
-rw-r--r--drivers/thermal/fair_share.c4
-rw-r--r--drivers/thermal/gov_bang_bang.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c10
-rw-r--r--drivers/thermal/max77620_thermal.c2
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c70
-rw-r--r--drivers/thermal/qoriq_thermal.c337
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c4
-rw-r--r--drivers/thermal/rcar_thermal.c9
-rw-r--r--drivers/thermal/rockchip_thermal.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c9
-rw-r--r--drivers/thermal/st/stm_thermal.c388
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/sun8i_thermal.c639
-rw-r--r--drivers/thermal/tegra/soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c20
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_hwmon.c28
-rw-r--r--drivers/thermal/thermal_hwmon.h7
-rw-r--r--drivers/thermal/user_space.c4
-rw-r--r--drivers/thermal/zx2967_thermal.c1
-rw-r--r--drivers/thunderbolt/Kconfig11
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/cap.c11
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h3
-rw-r--r--drivers/thunderbolt/eeprom.c137
-rw-r--r--drivers/thunderbolt/nhi.c3
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c441
-rw-r--r--drivers/thunderbolt/tb.c227
-rw-r--r--drivers/thunderbolt/tb.h101
-rw-r--r--drivers/thunderbolt/tb_msgs.h6
-rw-r--r--drivers/thunderbolt/tb_regs.h65
-rw-r--r--drivers/thunderbolt/tmu.c383
-rw-r--r--drivers/thunderbolt/tunnel.c169
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c764
-rw-r--r--drivers/thunderbolt/xdomain.c6
-rw-r--r--drivers/tty/cyclades.c10
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/n_gsm.c2
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/lantiq.c2
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/sb1250-duart.c4
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink.c6
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c10
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c216
-rw-r--r--drivers/usb/cdns3/debug.h2
-rw-r--r--drivers/usb/cdns3/gadget.c536
-rw-r--r--drivers/usb/cdns3/gadget.h26
-rw-r--r--drivers/usb/cdns3/trace.h93
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/ci.h10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c9
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.h2
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hub.c1
-rw-r--r--drivers/usb/dwc2/core_intr.c7
-rw-r--r--drivers/usb/dwc2/debugfs.c3
-rw-r--r--drivers/usb/dwc2/gadget.c25
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc3/core.c3
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/dwc3/gadget.c56
-rw-r--r--drivers/usb/dwc3/gadget.h14
-rw-r--r--drivers/usb/dwc3/host.c6
-rw-r--r--drivers/usb/early/xhci-dbc.c2
-rw-r--r--drivers/usb/gadget/Kconfig28
-rw-r--r--drivers/usb/gadget/configfs.c43
-rw-r--r--drivers/usb/gadget/function/f_ecm.c16
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c17
-rw-r--r--drivers/usb/gadget/function/u_audio.c29
-rw-r--r--drivers/usb/gadget/legacy/Kconfig28
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c2
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/multi.c2
-rw-r--r--drivers/usb/gadget/legacy/ncm.c2
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c16
-rw-r--r--drivers/usb/gadget/udc/net2272.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.c2
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c6
-rw-r--r--drivers/usb/host/Kconfig56
-rw-r--r--drivers/usb/host/ehci-exynos.c4
-rw-r--r--drivers/usb/host/ehci-mv.c21
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c6
-rw-r--r--drivers/usb/host/ehci-sh.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c16
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c5
-rw-r--r--drivers/usb/host/xhci-tegra.c440
-rw-r--r--drivers/usb/isp1760/isp1760-if.c4
-rw-r--r--drivers/usb/misc/usb3503.c94
-rw-r--r--drivers/usb/musb/Kconfig12
-rw-r--r--drivers/usb/musb/Makefile4
-rw-r--r--drivers/usb/musb/davinci.c57
-rw-r--r--drivers/usb/musb/jz4740.c75
-rw-r--r--drivers/usb/musb/mediatek.c582
-rw-r--r--drivers/usb/musb/musb_am335x.c44
-rw-r--r--drivers/usb/musb/musb_core.c188
-rw-r--r--drivers/usb/musb/musb_core.h20
-rw-r--r--drivers/usb/musb/musb_dma.h9
-rw-r--r--drivers/usb/musb/musb_host.c46
-rw-r--r--drivers/usb/musb/musb_io.h18
-rw-r--r--drivers/usb/musb/musb_trace.h33
-rw-r--r--drivers/usb/musb/musbhsdma.c56
-rw-r--r--drivers/usb/musb/omap2430.c164
-rw-r--r--drivers/usb/musb/sunxi.c6
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c26
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-generic.c39
-rw-r--r--drivers/usb/phy/phy-generic.h3
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c96
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c896
-rw-r--r--drivers/usb/phy/phy-ulpi.c48
-rw-r--r--drivers/usb/phy/phy.c13
-rw-r--r--drivers/usb/renesas_usbhs/common.c22
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c4
-rw-r--r--drivers/usb/renesas_usbhs/rza2.c2
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/Kconfig3
-rw-r--r--drivers/usb/serial/ch341.c6
-rw-r--r--drivers/usb/serial/cyberjack.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c16
-rw-r--r--drivers/usb/serial/ir-usb.c185
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/opticon.c63
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/usb-serial-simple.c2
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/typec/altmodes/displayport.c5
-rw-r--r--drivers/usb/typec/bus.c42
-rw-r--r--drivers/usb/typec/class.c52
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c5
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/displayport.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c95
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h14
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c191
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c4
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c4
-rw-r--r--drivers/video/fbdev/carminefb.c4
-rw-r--r--drivers/video/fbdev/i810/i810_main.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c4
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c4
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c4
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c4
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c4
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c4
-rw-r--r--drivers/video/fbdev/sh7760fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c4
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/video/fbdev/tdfxfb.c2
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
-rw-r--r--drivers/video/fbdev/vermilion/cr_pll.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c4
-rw-r--r--drivers/video/fbdev/via/via-core.c2
-rw-r--r--drivers/video/fbdev/w100fb.c6
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c1
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c1
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c2
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--drivers/xen/preempt.c4
2231 files changed, 160546 insertions, 40789 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index aaef17cc6512..31cf17dee252 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -171,7 +171,7 @@ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_PERF_EVENTS) += perf/
obj-$(CONFIG_RAS) += ras/
-obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
+obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 002838d23b86..cc57bab146b5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS
config ACPI_PROCESSOR_CSTATE
def_bool y
+ depends on ACPI_PROCESSOR
depends on IA64 || X86
config ACPI_PROCESSOR_IDLE
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 433376e819bb..953437a216f6 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -104,7 +104,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
info->gaddr = lpit_native->residency_counter;
if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- info->iomem_addr = ioremap_nocache(info->gaddr.address,
+ info->iomem_addr = ioremap(info->gaddr.address,
info->gaddr.bit_width / 8);
if (!info->iomem_addr)
return;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 2c4dda0787e8..5379bc3f275d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -705,3 +705,185 @@ void __init acpi_processor_init(void)
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+/**
+ * acpi_processor_claim_cst_control - Request _CST control from the platform.
+ */
+bool acpi_processor_claim_cst_control(void)
+{
+ static bool cst_control_claimed;
+ acpi_status status;
+
+ if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
+ return true;
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("ACPI: Failed to claim processor _CST control\n");
+ return false;
+ }
+
+ cst_control_claimed = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+
+/**
+ * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
+ * @handle: ACPI handle of the processor object containing the _CST.
+ * @cpu: The numeric ID of the target CPU.
+ * @info: Object write the C-states information into.
+ *
+ * Extract the C-state information for the given CPU from the output of the _CST
+ * control method under the corresponding ACPI processor object (or processor
+ * device object) and populate @info with it.
+ *
+ * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
+ * acpi_processor_ffh_cstate_probe() to verify them and update the
+ * cpu_cstate_entry data for @cpu.
+ */
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cst;
+ acpi_status status;
+ u64 count;
+ int last_index = 0;
+ int i, ret = 0;
+
+ status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "No _CST\n");
+ return -ENODEV;
+ }
+
+ cst = buffer.pointer;
+
+ /* There must be at least 2 elements. */
+ if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
+ acpi_handle_warn(handle, "Invalid _CST output\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ count = cst->package.elements[0].integer.value;
+
+ /* Validate the number of C-states. */
+ if (count < 1 || count != cst->package.count - 1) {
+ acpi_handle_warn(handle, "Inconsistent _CST data\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ for (i = 1; i <= count; i++) {
+ union acpi_object *element;
+ union acpi_object *obj;
+ struct acpi_power_register *reg;
+ struct acpi_processor_cx cx;
+
+ /*
+ * If there is not enough space for all C-states, skip the
+ * excess ones and log a warning.
+ */
+ if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
+ acpi_handle_warn(handle,
+ "No room for more idle states (limit: %d)\n",
+ ACPI_PROCESSOR_MAX_POWER - 1);
+ break;
+ }
+
+ memset(&cx, 0, sizeof(cx));
+
+ element = &cst->package.elements[i];
+ if (element->type != ACPI_TYPE_PACKAGE)
+ continue;
+
+ if (element->package.count != 4)
+ continue;
+
+ obj = &element->package.elements[0];
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ continue;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+
+ obj = &element->package.elements[1];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.type = obj->integer.value;
+ /*
+ * There are known cases in which the _CST output does not
+ * contain C1, so if the type of the first state found is not
+ * C1, leave an empty slot for C1 to be filled in later.
+ */
+ if (i == 1 && cx.type != ACPI_STATE_C1)
+ last_index = 1;
+
+ cx.address = reg->address;
+ cx.index = last_index + 1;
+
+ if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
+ /*
+ * In the majority of cases _CST describes C1 as
+ * a FIXED_HARDWARE C-state, but if the command
+ * line forbids using MWAIT, use CSTATE_HALT for
+ * C1 regardless.
+ */
+ if (cx.type == ACPI_STATE_C1 &&
+ boot_option_idle_override == IDLE_NOMWAIT) {
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ }
+ } else if (cx.type == ACPI_STATE_C1) {
+ /*
+ * In the special case of C1, FIXED_HARDWARE can
+ * be handled by executing the HLT instruction.
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ continue;
+ }
+ } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+ cx.address);
+ } else {
+ continue;
+ }
+
+ if (cx.type == ACPI_STATE_C1)
+ cx.valid = 1;
+
+ obj = &element->package.elements[2];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.latency = obj->integer.value;
+
+ obj = &element->package.elements[3];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ memcpy(&info->states[++last_index], &cx, sizeof(cx));
+ }
+
+ acpi_handle_info(handle, "Found %d idle states\n", last_index);
+
+ info->count = last_index;
+
+ end:
+ kfree(buffer.pointer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 2f380e7381d6..15c5b272e698 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2187,7 +2187,7 @@ int acpi_video_register(void)
if (register_count) {
/*
* if the function of acpi_video_register is already called,
- * don't register the acpi_vide_bus again and return no error.
+ * don't register the acpi_video_bus again and return no error.
*/
goto leave;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 863ade9add6d..173447d50acf 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 54f81eac7ec9..89101e53324b 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index d5478cd4a857..ede4b9cc9e85 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 694cf206fa9a..a676daaa2da5 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 82f81501566b..7ba6e308f146 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c8652f91054e..79f292687bd6 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index fd3beea93421..38ffa2c0a496 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index bcf8f7501db7..67f282e9e0af 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 20706adbc148..a6d896cda2a5 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 1ea52576f0a2..af58cd2dc9d3 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 283614e82a20..2269e10bc21b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 7da1864798a0..e618ddfab2fd 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8def0e3d690f..9f0219a8cb98 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -260,7 +260,8 @@ struct acpi_object_index_field {
/* The buffer_field is different in that it is part of a Buffer, not an op_region */
struct acpi_object_buffer_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */
+ union acpi_operand_object *buffer_obj; /* Containing Buffer object */
};
/******************************************************************************
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 9d78134428e3..8825394be9ab 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6e32c97cba6c..bc00b85c0a8f 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 387163b962a7..cd0f5df0ea23 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 422cd8f2b92e..6de8a1650d3d 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 2043dff370b1..4c900c108f3f 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index dfbf1dbd4033..734624facda3 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 5fb50634e08e..7c89b470ec81 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 49e412edd7c6..1d541bbac4a3 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 7c3bd4ab60fc..e5234e001acf 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 47d2e5059849..bb9600b867ee 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index e1632b340182..aa71f65395d2 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer,
if (ACPI_FAILURE(status)
|| temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
acpi_os_printf
- ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ("Invalid address space ID: must be between 0 and %u inclusive\n",
ACPI_NUM_PREDEFINED_REGIONS - 1);
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 85b34d02233e..ad17f62e51d9 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 5034fab9cf69..4b5b6e859f62 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 0d3e1ced1f57..63bc5f19fb82 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index faa38a22263a..c901f5aec739 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -243,7 +243,7 @@ cleanup:
* FUNCTION: acpi_ds_get_field_names
*
* PARAMETERS: info - create_field info structure
- * ` walk_state - Current method state
+ * walk_state - Current method state
* arg - First parser arg for the field name list
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a1ffed29903b..9be2a309424c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index f59b4d944f7f..cf67caff878a 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 179129a2deb1..c0a14a6a2c20 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 10f32b62608e..d9c26e720cb7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
}
obj_desc->buffer_field.buffer_obj = buffer_desc;
+ obj_desc->buffer_field.is_create_field =
+ aml_opcode == AML_CREATE_FIELD_OP;
/* Reference count for buffer_desc inherits obj_desc count */
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 997faa10f615..d869568d55c2 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index d75aae304595..5e81a1ae44cf 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index c88fd31208a5..697974e37edf 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
+ /*
+ * Disassembler: handle create field operators here.
+ *
+ * create_buffer_field is a deferred op that is typically processed in load
+ * pass 2. However, disassembly of control method contents walk the parse
+ * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
+ * in a later walk. This is a problem when there is a control method that
+ * has the same name as the AML_CREATE object. In this case, any use of the
+ * name segment will be detected as a method call rather than a reference
+ * to a buffer field.
+ *
+ * This earlier creation during disassembly solves this issue by inserting
+ * the named object in the ACPI namespace so that references to this name
+ * would be a name string rather than a method call.
+ */
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
+ (walk_state->op_info->flags & AML_CREATE)) {
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ return_ACPI_STATUS(status);
+ }
+
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 935a8e2623e4..b31457ca926c 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 39acf7b286da..9c397642fed7 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index de79f835a373..809a0c0536b5 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e2f5a05c066..8c83d8c620dc 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 5c77bee5d31f..0ced84ae13e4 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 344feba29063..3e39907fedd9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c7adaa7b582..132adff1e131 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 70d21d5ec5f3..6effd8076dcc 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 917892227e09..738873e876ca 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 3ef4e27995f0..5884eba047f7 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index aa98fe07cd1b..ce1eda6beb84 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 1ff126460007..738d4b231f34 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index aee09640d710..aefc0145e583 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 279ef0557aa3..e4e012297eee 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index e528fe56b755..1a15b0087379 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 04a40d563dd6..2c39ff2a7406 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 47265b073e6f..da97fd0c6b51 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index c7af07566b7b..43711412722f 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 46a8baf28bd0..68efd704e2dc 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index ca2966bacb50..50c7aad2e86d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index f376fc00064e..a17482428b46 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index b1aeec8cac55..a5223dcaee70 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index a9bc938a3b55..47a4d9a40d6b 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d3d2dbfba680..e85eb31e5075 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length)
* RETURN: Status
*
* DESCRIPTION: Read from a named field. Returns either an Integer or a
- * Buffer, depending on the size of the field.
+ * Buffer, depending on the size of the field and whether if a
+ * field is created by the create_field() operator.
*
******************************************************************************/
@@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
+ * However, all buffer fields created by create_field operator needs to
+ * remain as a buffer to match other AML interpreter implementations.
+ *
* Note: Field.length is in bits.
*/
buffer_length =
(acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
- if (buffer_length > acpi_gbl_integer_byte_width) {
+ if (buffer_length > acpi_gbl_integer_byte_width ||
+ (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD &&
+ obj_desc->buffer_field.is_create_field)) {
/* Field is too large for an Integer, create a Buffer instead */
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 95a0dcb4f7b9..ade35ff1c7ba 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 60e854965af9..717e3998fd77 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 775cd62af5b3..9ff247cba571 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 6b76be5212a4..74f8b0d0452b 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 06e35ea09823..a46d685a3ffc 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 5e4a31a11df4..03241d18ac1d 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index a4ebce417930..c8d0d75fc450 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 31385a0b2dab..55d0fa056fe7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 728d752f7adc..a4e306690a21 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c08521194b29..d15a66de26c0 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index b223d01e6bf8..3e4018678c09 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 36da5c0ef69c..912a078c60a4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index bdfe4d33b483..4d1b22971d58 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index c5aa4b0deb70..760bc7cef55a 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 7f3c3571c292..3adc0a29d890 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 4e43c8277f07..8c34f4e2ab8f 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc9e2b1c1ad9..dc66696080a5 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index a538f7799b78..f329b01672bb 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index db7f93ca539f..832a47885b99 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 75380be1c2ef..8fefa6feac2f 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 926f7e080f22..9b9aac27ff7e 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index dee3affaca49..d9be5d0545d4 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 565bd3f29f31..1b4252bdcd0b 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index b62db8ec446f..243a25add28f 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2fb9f75d71c5..07473ddfa9a9 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index cd576153257c..4d94861e6093 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c4fd97104024..134dbfadcd15 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 2919746c9041..a4b66f4b2714 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 0e97ed38973f..d5e8405e9d8f 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index c86d0770ed6e..c86c82939ebb 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 9ad340f644a1..994f0b556c60 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 73e5c83c8c9f..b691fe20e384 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 61e9dfc9fe8c..e16f6a0c2c3f 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index d7c4d6e8e21e..9ba17891edb6 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index f16cf5e4742c..7e74a765e785 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 2f9d93122d0c..0cea9c363ace 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 9a80e3b23496..237b3ddeb075 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index be86fea8e4d4..90db2d85e7f5 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 663d85e0adba..125143c41bb8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b8d007c84d32..e66abdab8f31 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index ceea6af79d12..b7f3e8603ad8 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 161e60ddfb69..984129dcaa0c 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e62c7897fdf1..3b40db4ad9f3 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 207805047bc4..3cf0687b9915 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ded2779fc8ea..2480c26c5171 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 43775c5ce17c..28af49263ebf 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 15e7563829f1..ab9327f6a63c 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9b386530ffbe..c780046bf294 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index f153ca804740..fceb311995e9 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 22d8a2becdd0..c8aef0694864 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 2512f584fa3c..00efae2f95ba 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index cf91841297c2..0fe3adf6b0e5 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index ee2ee2c858f2..1bbfc8def388 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 2cf36451e46f..523b1e9b98d4 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 0041bfba9abc..907edc5edba7 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index b2abb40023a6..56d81e490a5c 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index ef1ffd36ab3f..0bb15add2245 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 4764f849cb78..0b3494ad9a70 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index c5f0b8ec70cc..dfe1ac3ae34a 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 1640685bf4ae..f8403d480318 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0782acf85722..bcba993d4dac 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index e2859d09ca2e..0edc6ef5d46d 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index bb260376bd59..99fa48722cf6 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index d64da4d9e8d0..303ab51b4fcf 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index f6cd7d4f698b..d78656d960e8 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index db897af1de05..f2ec427f4e29 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 8533fce7fa93..1b03a2747401 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1fb8327f3c3b..41bdd0278dd8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5b169b5f0f1a..0c8cb0612414 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 65beaa237669..befdd13b403b 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 558a9f3b0678..8180d1a458f5 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index b0622ec4bb85..e6dcbdc3fc6e 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index b6da135d5f41..0e02f12513dc 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 30198c828ab6..3bb06935a2ad 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6f33e7c72327..fdbc397c038d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 8b4ff11d617a..46be549539e7 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index eee97a902696..3e60bdac2200 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index ad2b218039d0..0a01c08dad8a 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 1b0f68f5ed8c..05fe3470fb93 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 5839f2fa7400..a874dac7db5c 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 14de4d15e618..d366be431a84 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 0a7cf8007643..b8039954b0d1 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index f497c4b30e65..ca7c9f0144ef 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index cf769e94fe0f..653e3bb20036 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 8906c80175e6..103acbbfcf9a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
- timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE);
+ timer_setup(&ghes->timer, ghes_poll_func, 0);
ghes_add_timer(ghes);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 33f71983e001..6078064684c6 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -298,6 +298,59 @@ out:
return status;
}
+struct iort_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static bool apply_id_count_workaround;
+
+static struct iort_workaround_oem_info wa_info[] __initdata = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static void __init
+iort_check_id_count_workaround(struct acpi_table_header *tbl)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision) {
+ apply_id_count_workaround = true;
+ pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
+ break;
+ }
+ }
+}
+
+static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
+{
+ u32 map_max = map->input_base + map->id_count;
+
+ /*
+ * The IORT specification revision D (Section 3, table 4, page 9) says
+ * Number of IDs = The number of IDs in the range minus one, but the
+ * IORT code ignored the "minus one", and some firmware did that too,
+ * so apply a workaround here to keep compatible with both the spec
+ * compliant and non-spec compliant firmwares.
+ */
+ if (apply_id_count_workaround)
+ map_max--;
+
+ return map_max;
+}
+
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
u32 *rid_out)
{
@@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return -ENXIO;
}
- if (rid_in < map->input_base ||
- (rid_in >= map->input_base + map->id_count))
+ if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
return -ENXIO;
*rid_out = map->output_base + (rid_in - map->input_base);
@@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void)
return;
}
+ iort_check_id_count_workaround(iort_table);
iort_init_platform_devices();
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 8f0e0c8d8c3d..15cc7d5a6185 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -38,6 +38,8 @@
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
+#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
+ ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
#define ACPI_BATTERY_DEVICE_NAME "Battery"
@@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
{
- return battery->full_charge_capacity && battery->design_capacity &&
+ return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
battery->full_charge_capacity < battery->design_capacity;
}
@@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int ret = 0;
+ int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
@@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
@@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (battery->capacity_now && battery->full_charge_capacity)
- val->intval = battery->capacity_now * 100/
- battery->full_charge_capacity;
+ if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
+ full_capacity = battery->full_charge_capacity;
+ else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_capacity = battery->design_capacity;
+
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
+ full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
else
- val->intval = 0;
+ val->intval = battery->capacity_now * 100/
+ full_capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void)
static int sysfs_add_battery(struct acpi_battery *battery)
{
struct power_supply_config psy_cfg = { .drv_data = battery, };
+ bool full_cap_broken = false;
+
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_cap_broken = true;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
- battery->bat_desc.properties = charge_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(charge_battery_props);
- } else if (battery->full_charge_capacity == 0) {
- battery->bat_desc.properties =
- energy_battery_full_cap_broken_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ charge_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = charge_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_props);
+ }
} else {
- battery->bat_desc.properties = energy_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = energy_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_props);
+ }
}
battery->bat_desc.name = acpi_device_bid(battery->device);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index b758b45737f5..f6925f16c4a2 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Razer Blade Stealth 13 late 2019, notification of the LID device
+ * only happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 5e4a8860a9c0..b64c62bfcea5 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1321,6 +1321,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
+ {"INT1044", }, /* Fan for Tiger Lake generation */
{"INT3404", }, /* Fan */
{}
};
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index eb58fc475a03..387f27ef3368 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3407_device_ids[] = {
+ {"INT1047", 0},
{"INT3407", 0},
{"", 0},
};
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 5c7a90186e3c..1ec7b6900662 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -13,6 +13,10 @@
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
+ {"INT1040"},
+ {"INT1043"},
+ {"INT1044"},
+ {"INT1047"},
{"INT3400"},
{"INT3401", INT3401_DEVICE},
{"INT3402"},
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d05be13c1022..08bc9751fe66 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1053,28 +1053,20 @@ void acpi_ec_unblock_transactions(void)
Event Management
-------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
-{
- if (handler)
- kref_get(&handler->kref);
- return handler;
-}
-
-static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
- bool found = false;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
- found = true;
- break;
+ kref_get(&handler->kref);
+ mutex_unlock(&ec->mutex);
+ return handler;
}
}
mutex_unlock(&ec->mutex);
- return found ? acpi_ec_get_query_handler(handler) : NULL;
+ return NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 816b0803f7fb..aaf4e8f348cf 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
+ {"INT1044", 0},
{"INT3404", 0},
{"", 0},
};
@@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = {
#define FAN_PM_OPS_PTR NULL
#endif
+#define ACPI_FPS_NAME_LEN 20
+
struct acpi_fan_fps {
u64 control;
u64 trip_point;
u64 speed;
u64 noise_level;
u64 power;
+ char name[ACPI_FPS_NAME_LEN];
+ struct device_attribute dev_attr;
};
struct acpi_fan_fif {
@@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b)
return fps1->speed - fps2->speed;
}
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+ int count;
+
+ if (fps->control == 0xFFFFFFFF || fps->control > 100)
+ count = snprintf(buf, PAGE_SIZE, "not-defined:");
+ else
+ count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+ if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point);
+
+ if (fps->speed == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed);
+
+ if (fps->noise_level == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100);
+
+ if (fps->power == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power);
+
+ return count;
+}
+
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
@@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device)
}
for (i = 0; i < fan->fps_count; i++) {
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
- struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
+ struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name),
+ &fan->fps[i] };
status = acpi_extract_package(&obj->package.elements[i + 1],
&format, &fps);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Invalid _FPS element\n");
- break;
+ goto err;
}
}
@@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device)
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
+ for (i = 0; i < fan->fps_count; ++i) {
+ struct acpi_fan_fps *fps = &fan->fps[i];
+
+ snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ fps->dev_attr.show = show_state;
+ fps->dev_attr.store = NULL;
+ fps->dev_attr.attr.name = fps->name;
+ fps->dev_attr.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+ if (status) {
+ int j;
+
+ for (j = 0; j < i; ++j)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+ break;
+ }
+ }
+
err:
kfree(obj);
return status;
@@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan);
if (acpi_fan_is_acpi4(device)) {
- if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
- goto end;
+ result = acpi_fan_get_fif(device);
+ if (result)
+ return result;
+
+ result = acpi_fan_get_fps(device);
+ if (result)
+ return result;
+
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
- goto end;
+ goto err_end;
}
}
@@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
- goto end;
+ goto err_end;
}
dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
@@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+ goto err_end;
+ }
+
+ return 0;
+
+err_end:
+ if (fan->acpi4) {
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
-end:
return result;
}
@@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
+ if (fan->acpi4) {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
thermal_cooling_device_unregister(fan->cdev);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index f31544d3656e..4ae93350b70d 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
*
* Return: The cache structure and the level we terminated with.
*/
-static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
- int local_level,
- struct acpi_subtable_header *res,
- struct acpi_pptt_cache **found,
- int level, int type)
+static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
+ unsigned int local_level,
+ struct acpi_subtable_header *res,
+ struct acpi_pptt_cache **found,
+ unsigned int level, int type)
{
struct acpi_pptt_cache *cache;
@@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
- pr_debug("Found cache @ level %d\n", level);
+ pr_debug("Found cache @ level %u\n", level);
*found = cache;
/*
* continue looking at this node's resource list
@@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
return local_level;
}
-static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- int *starting_level, int level,
- int type)
+static struct acpi_pptt_cache *
+acpi_find_cache_level(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *starting_level, unsigned int level,
+ int type)
{
struct acpi_subtable_header *res;
- int number_of_levels = *starting_level;
+ unsigned int number_of_levels = *starting_level;
int resource = 0;
struct acpi_pptt_cache *ret = NULL;
- int local_level;
+ unsigned int local_level;
/* walk down from processor node */
while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
@@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
unsigned int level,
struct acpi_pptt_processor **node)
{
- int total_levels = 0;
+ unsigned int total_levels = 0;
struct acpi_pptt_cache *found = NULL;
struct acpi_pptt_processor *cpu_node;
u8 acpi_type = acpi_cache_type(type);
- pr_debug("Looking for CPU %d's level %d cache type %d\n",
+ pr_debug("Looking for CPU %d's level %u cache type %d\n",
acpi_cpu_id, level, acpi_type);
cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2ae95df2e74f..dcc289e30166 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
- acpi_status status;
- u64 count;
- int current_count;
- int i, ret = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *cst;
+ int ret;
if (nocst)
return -ENODEV;
- current_count = 0;
-
- status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
- return -ENODEV;
- }
-
- cst = buffer.pointer;
-
- /* There must be at least 2 elements */
- if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
- pr_err("not enough elements in _CST\n");
- ret = -EFAULT;
- goto end;
- }
-
- count = cst->package.elements[0].integer.value;
+ ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
+ if (ret)
+ return ret;
- /* Validate number of power states. */
- if (count < 1 || count != cst->package.count - 1) {
- pr_err("count given by _CST is not valid\n");
- ret = -EFAULT;
- goto end;
- }
+ /*
+ * It is expected that there will be at least 2 states, C1 and
+ * something else (C2 or C3), so fail if that is not the case.
+ */
+ if (pr->power.count < 2)
+ return -EFAULT;
- /* Tell driver that at least _CST is supported. */
pr->flags.has_cst = 1;
-
- for (i = 1; i <= count; i++) {
- union acpi_object *element;
- union acpi_object *obj;
- struct acpi_power_register *reg;
- struct acpi_processor_cx cx;
-
- memset(&cx, 0, sizeof(cx));
-
- element = &(cst->package.elements[i]);
- if (element->type != ACPI_TYPE_PACKAGE)
- continue;
-
- if (element->package.count != 4)
- continue;
-
- obj = &(element->package.elements[0]);
-
- if (obj->type != ACPI_TYPE_BUFFER)
- continue;
-
- reg = (struct acpi_power_register *)obj->buffer.pointer;
-
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
- (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
- continue;
-
- /* There should be an easy way to extract an integer... */
- obj = &(element->package.elements[1]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.type = obj->integer.value;
- /*
- * Some buggy BIOSes won't list C1 in _CST -
- * Let acpi_processor_get_power_info_default() handle them later
- */
- if (i == 1 && cx.type != ACPI_STATE_C1)
- current_count++;
-
- cx.address = reg->address;
- cx.index = current_count + 1;
-
- cx.entry_method = ACPI_CSTATE_SYSTEMIO;
- if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
- if (acpi_processor_ffh_cstate_probe
- (pr->id, &cx, reg) == 0) {
- cx.entry_method = ACPI_CSTATE_FFH;
- } else if (cx.type == ACPI_STATE_C1) {
- /*
- * C1 is a special case where FIXED_HARDWARE
- * can be handled in non-MWAIT way as well.
- * In that case, save this _CST entry info.
- * Otherwise, ignore this info and continue.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- } else {
- continue;
- }
- if (cx.type == ACPI_STATE_C1 &&
- (boot_option_idle_override == IDLE_NOMWAIT)) {
- /*
- * In most cases the C1 space_id obtained from
- * _CST object is FIXED_HARDWARE access mode.
- * But when the option of idle=halt is added,
- * the entry_method type should be changed from
- * CSTATE_FFH to CSTATE_HALT.
- * When the option of idle=nomwait is added,
- * the C1 entry_method type should be
- * CSTATE_HALT.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- }
- } else {
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
- cx.address);
- }
-
- if (cx.type == ACPI_STATE_C1) {
- cx.valid = 1;
- }
-
- obj = &(element->package.elements[2]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.latency = obj->integer.value;
-
- obj = &(element->package.elements[3]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- current_count++;
- memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
-
- /*
- * We support total ACPI_PROCESSOR_MAX_POWER - 1
- * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
- */
- if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
- pr_warn("Limiting number of power states to max (%d)\n",
- ACPI_PROCESSOR_MAX_POWER);
- pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
- break;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
- current_count));
-
- /* Validate number of power states discovered */
- if (current_count < 2)
- ret = -EFAULT;
-
- end:
- kfree(buffer.pointer);
-
- return ret;
+ return 0;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
static inline void acpi_processor_cstate_first_run_checks(void)
{
- acpi_status status;
static int first_run;
if (first_run)
@@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void)
max_cstate);
first_run++;
- if (acpi_gbl_FADT.cst_control && !nocst) {
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
- acpi_gbl_FADT.cst_control, 8);
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Notifying BIOS of _CST ability failed"));
- }
+ if (nocst)
+ return;
+
+ acpi_processor_claim_cst_control();
}
#else
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6747a279621b..439880629839 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -61,8 +61,11 @@ static struct notifier_block tts_notifier = {
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
+ unsigned long acpi_wakeup_address;
+
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
+ acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;
acpi_set_waking_vector(acpi_wakeup_address);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 31014c7d3793..419f814d596a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -303,6 +303,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
.ident = "Apple MacBook Pro 12,1",
@@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+
+ /*
+ * Desktops which falsely report a backlight and which our heuristics
+ * for this do not catch.
+ */
{
.callback = video_detect_force_none,
.ident = "Dell OptiPlex 9020M",
@@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "MSI MS-7721",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+ },
+ },
{ },
};
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 46dc54d18f0b..2a04e8abd397 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
/*
* Fill in command table information. First, the header,
@@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
- n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+ acard_ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 66a570d0da83..6853dbb4131d 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -73,6 +73,7 @@ enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
BRCM_SATA_NSP,
+ BRCM_SATA_BCM7216,
};
enum brcm_ahci_quirks {
@@ -337,24 +338,39 @@ static const struct ata_port_info ahci_brcm_port_info = {
.port_ops = &ahci_brcm_platform_ops,
};
-#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
brcm_sata_phys_disable(priv);
- return ahci_platform_suspend(dev);
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ ret = ahci_platform_suspend(dev);
+ else
+ ret = 0;
+
+ if (priv->version != BRCM_SATA_BCM7216)
+ reset_control_assert(priv->rcdev);
+
+ return ret;
}
-static int brcm_ahci_resume(struct device *dev)
+static int __maybe_unused brcm_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
+ int ret = 0;
+
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
/* Make sure clocks are turned on before re-configuration */
ret = ahci_platform_enable_clks(hpriv);
@@ -393,7 +409,6 @@ out_disable_phys:
ahci_platform_disable_clks(hpriv);
return ret;
}
-#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
@@ -404,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+ {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -412,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
+ const char *reset_name = NULL;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
@@ -433,16 +450,19 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- /* Reset is optional depending on platform */
- priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
- if (!IS_ERR_OR_NULL(priv->rcdev))
- reset_control_deassert(priv->rcdev);
+ /* Reset is optional depending on platform and named differently */
+ if (priv->version == BRCM_SATA_BCM7216)
+ reset_name = "rescal";
+ else
+ reset_name = "ahci";
+
+ priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+ if (IS_ERR(priv->rcdev))
+ return PTR_ERR(priv->rcdev);
hpriv = ahci_platform_get_resources(pdev, 0);
- if (IS_ERR(hpriv)) {
- ret = PTR_ERR(hpriv);
- goto out_reset;
- }
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
hpriv->plat_data = priv;
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
@@ -459,6 +479,13 @@ static int brcm_ahci_probe(struct platform_device *pdev)
break;
}
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
+
ret = ahci_platform_enable_clks(hpriv);
if (ret)
goto out_reset;
@@ -500,7 +527,7 @@ out_disable_phys:
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
- if (!IS_ERR_OR_NULL(priv->rcdev))
+ if (priv->version != BRCM_SATA_BCM7216)
reset_control_assert(priv->rcdev);
return ret;
}
@@ -521,11 +548,26 @@ static int brcm_ahci_remove(struct platform_device *pdev)
return 0;
}
+static void brcm_ahci_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ /* All resources releasing happens via devres, but our device, unlike a
+ * proper remove is not disappearing, therefore using
+ * brcm_ahci_suspend() here which does explicit power management is
+ * appropriate.
+ */
+ ret = brcm_ahci_suspend(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
.remove = brcm_ahci_remove,
+ .shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 135173c8d138..391dff0f25a2 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -824,7 +824,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
acdev->pbase = res->start;
- acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ acdev->vbase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!acdev->vbase) {
dev_warn(&pdev->dev, "ioremap fail\n");
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 1bfd0154dad5..e47a28271f5b 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv)
priv->aapl_bus_id = bidp ? *bidp : 0;
/* Fixup missing Apple bus ID in case of media-bay */
- if (priv->mediabay && bidp == 0)
+ if (priv->mediabay && !bidp)
priv->aapl_bus_id = 1;
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index d3d851b014a3..bd87476ab481 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -891,7 +891,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
of_node_put(dma_node);
return -EINVAL;
}
- cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+ cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
of_node_put(dma_node);
@@ -909,7 +909,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!res_cs1)
return -EINVAL;
- cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
+ cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
resource_size(res_cs1));
if (!cs1)
return rv;
@@ -925,7 +925,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!res_cs0)
return -EINVAL;
- cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+ cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
resource_size(res_cs0));
if (!cs0)
return rv;
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index deae466395de..479c4b29b856 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -140,7 +140,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
info->gpio_line = gpiod;
info->irq = irq;
- info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+ info->iobase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!info->iobase)
return -ENOMEM;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 9d0d65efcd94..17d47ad03ab7 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -31,12 +31,6 @@
#include "suni.h"
#include "eni.h"
-#if !defined(__i386__) && !defined(__x86_64__)
-#ifndef ioremap_nocache
-#define ioremap_nocache(X,Y) ioremap(X,Y)
-#endif
-#endif
-
/*
* TODO:
*
@@ -1725,7 +1719,7 @@ static int eni_do_init(struct atm_dev *dev)
}
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,",
dev->number,pci_dev->revision,real_base,eni_dev->irq);
- if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) {
+ if (!(base = ioremap(real_base,MAP_MAX_SIZE))) {
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
"mapping\n",dev->number);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index aad00d2b28f5..cc87004d5e2d 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
}
if (!to) {
printk ("No more free channels for FS50..\n");
+ kfree(vcc);
return -EBUSY;
}
vcc->channo = dev->channo;
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
printk ("Channel is in use for FS155.\n");
+ kfree(vcc);
return -EBUSY;
}
}
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
tc, sizeof (struct fs_transmit_config));
if (!tc) {
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+ kfree(vcc);
return -ENOMEM;
}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f1a500205313..8fbd36eb8941 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1414,12 +1414,14 @@ fore200e_open(struct atm_vcc *vcc)
static void
fore200e_close(struct atm_vcc* vcc)
{
- struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
struct fore200e_vcc* fore200e_vcc;
+ struct fore200e* fore200e;
struct fore200e_vc_map* vc_map;
unsigned long flags;
ASSERT(vcc);
+ fore200e = FORE200E_DEV(vcc->dev);
+
ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
@@ -1464,10 +1466,10 @@ fore200e_close(struct atm_vcc* vcc)
static int
fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
- struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
- struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
+ struct fore200e* fore200e;
+ struct fore200e_vcc* fore200e_vcc;
struct fore200e_vc_map* vc_map;
- struct host_txq* txq = &fore200e->host_txq;
+ struct host_txq* txq;
struct host_txq_entry* entry;
struct tpd* tpd;
struct tpd_haddr tpd_haddr;
@@ -1480,9 +1482,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
unsigned char* data;
unsigned long flags;
- ASSERT(vcc);
- ASSERT(fore200e);
- ASSERT(fore200e_vcc);
+ if (!vcc)
+ return -EINVAL;
+
+ fore200e = FORE200E_DEV(vcc->dev);
+ fore200e_vcc = FORE200E_VCC(vcc);
+
+ if (!fore200e)
+ return -EINVAL;
+
+ txq = &fore200e->host_txq;
+ if (!fore200e_vcc)
+ return -EINVAL;
if (!test_bit(ATM_VF_READY, &vcc->flags)) {
DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 4a66888e7253..5fa7ce3745a0 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
filechk_fwbin = \
echo "/* Generated by $(src)/Makefile */" ;\
echo " .section .rodata" ;\
- echo " .p2align $(ASM_ALIGN)" ;\
+ echo " .p2align 4" ;\
echo "_fw_$(FWSTR)_bin:" ;\
echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
echo "_fw_end:" ;\
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 48616f358854..16134a69bf6f 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev)
retval = dev->power.disable_depth > 0 ? -EINVAL :
dev->power.runtime_status == RPM_ACTIVE
&& atomic_inc_not_zero(&dev->power.usage_count);
+ trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
@@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
}
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 70a9edb5f525..27f3e60608e5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
break;
}
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
return next_ws;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index ac9b31c57967..008f8da69d97 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_byte = {
+static const struct regmap_bus regmap_smbus_byte = {
.reg_write = regmap_smbus_byte_reg_write,
.reg_read = regmap_smbus_byte_reg_read,
};
@@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
return i2c_smbus_write_word_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word = {
+static const struct regmap_bus regmap_smbus_word = {
.reg_write = regmap_smbus_word_reg_write,
.reg_read = regmap_smbus_word_reg_read,
};
@@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
return i2c_smbus_write_word_swapped(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word_swapped = {
+static const struct regmap_bus regmap_smbus_word_swapped = {
.reg_write = regmap_smbus_word_write_swapped,
.reg_read = regmap_smbus_word_read_swapped,
};
@@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context,
return -EIO;
}
-static struct regmap_bus regmap_i2c = {
+static const struct regmap_bus regmap_i2c = {
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
@@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
return -EIO;
}
-static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 19f57ccfbe1d..59f911e57719 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
- /* Check for unwritable registers before we start */
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_writeable(map,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ /* Check for unwritable or noinc registers in range
+ * before we start
+ */
+ if (!regmap_writeable_noinc(map, reg)) {
+ for (i = 0; i < val_len / map->format.val_bytes; i++) {
+ unsigned int element =
+ reg + regmap_get_offset(map, i);
+ if (!regmap_writeable(map, element) ||
+ regmap_writeable_noinc(map, element))
+ return -EINVAL;
+ }
+ }
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index d8d0dc0ca5ac..0b081dee1e95 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop)
if (!prop->length)
return NULL;
- if (prop->is_array)
- return prop->pointer;
-
- return &prop->value;
+ return prop->is_inline ? &prop->value : prop->pointer;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
- const void *pointer = property_get_pointer(p);
const char * const *src_str;
size_t i, nval;
- if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer) {
- src_str = p->pointer;
- nval = p->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(src_str[i]);
- }
- kfree(pointer);
- } else if (p->type == DEV_PROP_STRING) {
- kfree(p->value.str);
+ if (p->type == DEV_PROP_STRING) {
+ src_str = property_get_pointer(p);
+ nval = p->length / sizeof(*src_str);
+ for (i = 0; i < nval; i++)
+ kfree(src_str[i]);
}
+
+ if (!p->is_inline)
+ kfree(p->pointer);
+
kfree(p->name);
}
-static const char * const *
-property_copy_string_array(const struct property_entry *src)
+static bool property_copy_string_array(const char **dst_ptr,
+ const char * const *src_ptr,
+ size_t nval)
{
- const char **d;
- const char * const *src_str = src->pointer;
- size_t nval = src->length / sizeof(*d);
int i;
- d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
- if (!d)
- return NULL;
-
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src_str[i], GFP_KERNEL);
- if (!d[i] && src_str[i]) {
+ dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
+ if (!dst_ptr[i] && src_ptr[i]) {
while (--i >= 0)
- kfree(d[i]);
- kfree(d);
- return NULL;
+ kfree(dst_ptr[i]);
+ return false;
}
}
- return d;
+ return true;
}
static int property_entry_copy_data(struct property_entry *dst,
const struct property_entry *src)
{
const void *pointer = property_get_pointer(src);
- const void *new;
-
- if (src->is_array) {
- if (!src->length)
- return -ENODATA;
-
- if (src->type == DEV_PROP_STRING) {
- new = property_copy_string_array(src);
- if (!new)
- return -ENOMEM;
- } else {
- new = kmemdup(pointer, src->length, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- }
+ void *dst_ptr;
+ size_t nval;
+
+ /*
+ * Properties with no data should not be marked as stored
+ * out of line.
+ */
+ if (!src->is_inline && !src->length)
+ return -ENODATA;
+
+ /*
+ * Reference properties are never stored inline as
+ * they are too big.
+ */
+ if (src->type == DEV_PROP_REF && src->is_inline)
+ return -EINVAL;
- dst->is_array = true;
- dst->pointer = new;
- } else if (src->type == DEV_PROP_STRING) {
- new = kstrdup(src->value.str, GFP_KERNEL);
- if (!new && src->value.str)
+ if (src->length <= sizeof(dst->value)) {
+ dst_ptr = &dst->value;
+ dst->is_inline = true;
+ } else {
+ dst_ptr = kmalloc(src->length, GFP_KERNEL);
+ if (!dst_ptr)
return -ENOMEM;
+ dst->pointer = dst_ptr;
+ }
- dst->value.str = new;
+ if (src->type == DEV_PROP_STRING) {
+ nval = src->length / sizeof(const char *);
+ if (!property_copy_string_array(dst_ptr, pointer, nval)) {
+ if (!dst->is_inline)
+ kfree(dst->pointer);
+ return -ENOMEM;
+ }
} else {
- dst->value = src->value;
+ memcpy(dst_ptr, pointer, src->length);
}
dst->length = src->length;
dst->type = src->type;
dst->name = kstrdup(src->name, GFP_KERNEL);
- if (!dst->name)
- goto out_free_data;
+ if (!dst->name) {
+ property_entry_free_data(dst);
+ return -ENOMEM;
+ }
return 0;
-
-out_free_data:
- property_entry_free_data(dst);
- return -ENOMEM;
}
/**
@@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
struct fwnode_reference_args *args)
{
struct swnode *swnode = to_swnode(fwnode);
- const struct software_node_reference *ref;
+ const struct software_node_ref_args *ref_array;
+ const struct software_node_ref_args *ref;
const struct property_entry *prop;
struct fwnode_handle *refnode;
+ u32 nargs_prop_val;
+ int error;
int i;
- if (!swnode || !swnode->node->references)
+ if (!swnode)
return -ENOENT;
- for (ref = swnode->node->references; ref->name; ref++)
- if (!strcmp(ref->name, propname))
- break;
+ prop = property_entry_get(swnode->node->properties, propname);
+ if (!prop)
+ return -ENOENT;
+
+ if (prop->type != DEV_PROP_REF)
+ return -EINVAL;
- if (!ref->name || index > (ref->nrefs - 1))
+ /*
+ * We expect that references are never stored inline, even
+ * single ones, as they are too big.
+ */
+ if (prop->is_inline)
+ return -EINVAL;
+
+ if (index * sizeof(*ref) >= prop->length)
return -ENOENT;
- refnode = software_node_fwnode(ref->refs[index].node);
+ ref_array = prop->pointer;
+ ref = &ref_array[index];
+
+ refnode = software_node_fwnode(ref->node);
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- prop = property_entry_get(swnode->node->properties, nargs_prop);
- if (!prop)
- return -EINVAL;
+ error = property_entry_read_int_array(swnode->node->properties,
+ nargs_prop, sizeof(u32),
+ &nargs_prop_val, 1);
+ if (error)
+ return error;
- nargs = prop->value.u32_data;
+ nargs = nargs_prop_val;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
@@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
args->nargs = nargs;
for (i = 0; i < nargs; i++)
- args->args[i] = ref->refs[index].args[i];
+ args->args[i] = ref->args[i];
return 0;
}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 86e85daa80bf..305c7751184a 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE
The module name will be test_async_driver_probe.ko
If unsure say N.
+config KUNIT_DRIVER_PE_TEST
+ bool "KUnit Tests for property entry API"
+ depends on KUNIT=y
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 0f1f7277a013..3ca56367c84b 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
+
+obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
new file mode 100644
index 000000000000..abe03315180f
--- /dev/null
+++ b/drivers/base/test/property-entry-test.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+// Unit tests for property entries API
+//
+// Copyright 2019 Google LLC.
+
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+static void pe_test_uints(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8("prop-u8", 8),
+ PROPERTY_ENTRY_U16("prop-u16", 16),
+ PROPERTY_ENTRY_U32("prop-u32", 32),
+ PROPERTY_ENTRY_U64("prop-u64", 64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[2];
+ u16 val_u16, array_u16[2];
+ u32 val_u32, array_u32[2];
+ u64 val_u64, array_u64[2];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_uint_arrays(struct kunit *test)
+{
+ static const u8 a_u8[16] = { 8, 9 };
+ static const u16 a_u16[16] = { 16, 17 };
+ static const u32 a_u32[16] = { 32, 33 };
+ static const u64 a_u64[16] = { 64, 65 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
+ PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
+ PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
+ PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[32];
+ u16 val_u16, array_u16[32];
+ u32 val_u32, array_u32[32];
+ u64 val_u64, array_u64[32];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_strings(struct kunit *test)
+{
+ static const char *strings[] = {
+ "string-a",
+ "string-b",
+ };
+
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING("str", "single"),
+ PROPERTY_ENTRY_STRING("empty", ""),
+ PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ const char *str;
+ const char *strs[10];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_string(node, "str", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "single");
+
+ error = fwnode_property_read_string_array(node, "str", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ /* asking for more data returns what we have */
+ error = fwnode_property_read_string_array(node, "str", strs, 2);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ error = fwnode_property_read_string(node, "no-str", &str);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_string_array(node, "no-str", strs, 1);
+ KUNIT_EXPECT_LT(test, error, 0);
+
+ error = fwnode_property_read_string(node, "empty", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 3);
+ KUNIT_EXPECT_EQ(test, error, 2);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+ KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+
+ /* NULL argument -> returns size */
+ error = fwnode_property_read_string_array(node, "strs", NULL, 0);
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ /* accessing array as single value */
+ error = fwnode_property_read_string(node, "strs", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "string-a");
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_bool(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_BOOL("prop"),
+ { }
+ };
+
+ struct fwnode_handle *node;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
+ KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
+
+ fwnode_remove_software_node(node);
+}
+
+/* Verifies that small U8 array is stored inline when property is copied */
+static void pe_test_move_inline_u8(struct kunit *test)
+{
+ static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
+ static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
+ PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
+ { }
+ };
+
+ struct property_entry *copy;
+ const u8 *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ data_ptr = (u8 *)&copy[0].value;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2);
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6);
+
+ property_entries_free(copy);
+}
+
+/* Verifies that single string array is stored inline when property is copied */
+static void pe_test_move_inline_str(struct kunit *test)
+{
+ static char *str_array_small[] = { "a" };
+ static char *str_array_big[] = { "b", "c", "d", "e" };
+ static char *str_array_small_empty[] = { "" };
+ static struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
+ PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
+ PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
+ { }
+ };
+
+ struct property_entry *copy;
+ const char * const *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
+ KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
+
+ KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
+
+ property_entries_free(copy);
+}
+
+/* Handling of reference properties */
+static void pe_test_reference(struct kunit *test)
+{
+ static const struct software_node nodes[] = {
+ { .name = "1", },
+ { .name = "2", },
+ { }
+ };
+
+ static const struct software_node_ref_args refs[] = {
+ {
+ .node = &nodes[0],
+ .nargs = 0,
+ },
+ {
+ .node = &nodes[1],
+ .nargs = 2,
+ .args = { 3, 4 },
+ },
+ };
+
+ const struct property_entry entries[] = {
+ PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
+ PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ struct fwnode_reference_args ref;
+ int error;
+
+ error = software_node_register_nodes(nodes);
+ KUNIT_ASSERT_EQ(test, error, 0);
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 1, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+
+ /* asking for more args, padded with zero data */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 3, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 2, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* array of references */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* second reference in the array */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 2, 1, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 2, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+ software_node_unregister_nodes(nodes);
+}
+
+static struct kunit_case property_entry_test_cases[] = {
+ KUNIT_CASE(pe_test_uints),
+ KUNIT_CASE(pe_test_uint_arrays),
+ KUNIT_CASE(pe_test_strings),
+ KUNIT_CASE(pe_test_bool),
+ KUNIT_CASE(pe_test_move_inline_u8),
+ KUNIT_CASE(pe_test_move_inline_str),
+ KUNIT_CASE(pe_test_reference),
+ { }
+};
+
+static struct kunit_suite property_entry_test_suite = {
+ .name = "property-entry",
+ .test_cases = property_entry_test_cases,
+};
+
+kunit_test_suite(property_entry_test_suite);
diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c
index 57f10b58b47c..c153c96a6145 100644
--- a/drivers/bcma/driver_chipcommon_b.c
+++ b/drivers/bcma/driver_chipcommon_b.c
@@ -48,7 +48,7 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb)
return 0;
ccb->setup_done = 1;
- ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE);
+ ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE);
if (!ccb->mii)
return -ENOMEM;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index c42cec7c7ecc..88a93c266c19 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -115,7 +115,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, sizeof(val));
+ mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
@@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, sizeof(val));
+ mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
@@ -515,7 +515,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+ io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start,
resource_size(&pc_host->mem_resource));
pc_host->pci_controller.io_map_base = io_map_base;
set_io_port_base(pc_host->pci_controller.io_map_base);
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index c8073b509a2b..90d5bdc12e03 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -172,7 +172,7 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
/* iomap only first core. We have to read some register on this core
* to scan the bus.
*/
- bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
+ bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
if (!bus->mmio)
return -ENOMEM;
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 4a2d1b235fb5..1a942f734188 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -219,7 +219,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
- u32 addrl, addrh, sizel, sizeh = 0;
+ u32 addrl, addrh, sizeh = 0;
u32 size;
u32 ent = bcma_erom_get_ent(bus, eromptr);
@@ -239,12 +239,9 @@ static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
size = bcma_erom_get_ent(bus, eromptr);
- sizel = size & SCAN_SIZE_SZ;
if (size & SCAN_SIZE_SG32)
sizeh = bcma_erom_get_ent(bus, eromptr);
- } else
- sizel = SCAN_ADDR_SZ_BASE <<
- ((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT);
+ }
return addrl;
}
@@ -425,11 +422,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
}
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+ core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE);
if (!core->io_addr)
return -ENOMEM;
if (core->wrap) {
- core->io_wrap = ioremap_nocache(core->wrap,
+ core->io_wrap = ioremap(core->wrap,
BCMA_CORE_SIZE);
if (!core->io_wrap) {
iounmap(core->io_addr);
@@ -472,7 +469,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ eromptr = ioremap(erombase, BCMA_CORE_SIZE);
if (!eromptr)
return -ENOMEM;
} else {
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 5cf49d9db95e..ed34785dd64b 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
/* Writes must be at the write pointer position */
if (sector != zone->wp)
return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_EMPTY)
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += nr_sectors;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 1f3f9e0f02a8..4eaf97d7a170 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -827,7 +827,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_csr;
}
- card->csr_remap = ioremap_nocache(csr_base, csr_len);
+ card->csr_remap = ioremap(csr_base, csr_len);
if (!card->csr_remap) {
dev_printk(KERN_ERR, &card->dev->dev,
"Unable to remap memory region\n");
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 8e05706fe5d9..1f498f358f60 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -36,6 +36,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
+
bt_dev_err(hdev, "BCM: Reading device address failed (%d)", err);
return err;
}
@@ -107,6 +108,52 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
+int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "BCM: Read PCM int params failed (%d)", err);
+ return err;
+ }
+
+ if (skb->len != 6 || skb->data[0]) {
+ bt_dev_err(hdev, "BCM: Read PCM int params length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ if (params)
+ memcpy(params, skb->data + 1, 5);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_read_pcm_int_params);
+
+int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1c, 5, params, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "BCM: Write PCM int params failed (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_write_pcm_int_params);
+
int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
{
const struct hci_command_hdr *cmd;
@@ -177,6 +224,7 @@ static int btbcm_reset(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
+
bt_dev_err(hdev, "BCM: Reset failed (%d)", err);
return err;
}
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d204be8a84bf..014ef847a486 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -54,6 +54,10 @@ struct bcm_set_pcm_format_params {
int btbcm_check_bdaddr(struct hci_dev *hdev);
int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw);
+int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params);
+int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params);
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
@@ -74,6 +78,18 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
+static inline int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
{
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index f838537f9f89..577cfa3329db 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
* the end.
*/
len = patch_length;
- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
- GFP_KERNEL);
+ buf = kvmalloc(patch_length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf;
@@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0)
return ret;
ret = fw->size;
- *buff = kmemdup(fw->data, ret, GFP_KERNEL);
- if (!*buff)
+ *buff = kvmalloc(fw->size, GFP_KERNEL);
+ if (*buff)
+ memcpy(*buff, fw->data, ret);
+ else
ret = -ENOMEM;
release_firmware(fw);
@@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
goto out;
if (btrtl_dev->cfg_len > 0) {
- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
+ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
}
memcpy(tbuff, fw_data, ret);
- kfree(fw_data);
+ kvfree(fw_data);
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
ret += btrtl_dev->cfg_len;
@@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- kfree(fw_data);
+ kvfree(fw_data);
return ret;
}
void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
- kfree(btrtl_dev->fw_data);
- kfree(btrtl_dev->cfg_data);
+ kvfree(btrtl_dev->fw_data);
+ kvfree(btrtl_dev->cfg_data);
kfree(btrtl_dev);
}
EXPORT_SYMBOL_GPL(btrtl_free);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index fd9571d5fdac..199e8f7d426d 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -145,11 +145,20 @@ static int btsdio_rx_packet(struct btsdio_data *data)
data->hdev->stat.byte_rx += len;
- hci_skb_pkt_type(skb) = hdr[3];
-
- err = hci_recv_frame(data->hdev, skb);
- if (err < 0)
- return err;
+ switch (hdr[3]) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
+ hci_skb_pkt_type(skb) = hdr[3];
+ err = hci_recv_frame(data->hdev, skb);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ kfree_skb(skb);
+ return -EINVAL;
+ }
sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 70e385987d41..f5924f3e8b8d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -266,6 +266,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
@@ -552,9 +553,9 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
}
bt_dev_err(hdev, "Reset Realtek device via gpio");
- gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(200);
gpiod_set_value_cansleep(reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 0);
}
static inline void btusb_free_frags(struct btusb_data *data)
@@ -2602,7 +2603,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
* and being processed the events from there then.
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
- data->evt_skb = skb_clone(skb, GFP_KERNEL);
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb)
goto err_out;
}
@@ -2867,7 +2868,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- return err;
+ goto err_release_fw;
}
/* Wait a few moments for firmware activation done */
@@ -3832,6 +3833,10 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
+
+ err = usb_autopm_get_interface(intf);
+ if (err < 0)
+ goto out_free_dev;
}
if (id->driver_info & BTUSB_AMP) {
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index d2a6a4afdbbb..b236cb11c0dc 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
@@ -48,6 +49,15 @@
#define BCM_NUM_SUPPLIES 2
/**
+ * struct bcm_device_data - device specific data
+ * @no_early_set_baudrate: Disallow set baudrate before driver setup()
+ */
+struct bcm_device_data {
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
+};
+
+/**
* struct bcm_device - device driver resources
* @serdev_hu: HCI UART controller struct
* @list: bcm_device_list node
@@ -79,6 +89,7 @@
* @hu: pointer to HCI UART controller struct,
* used to disable flow control during runtime suspend and system sleep
* @is_suspended: whether flow control is currently disabled
+ * @no_early_set_baudrate: don't set_baudrate before setup()
*/
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
@@ -112,6 +123,9 @@ struct bcm_device {
struct hci_uart *hu;
bool is_suspended;
#endif
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
+ u8 pcm_int_params[5];
};
/* generic bcm uart resources */
@@ -445,11 +459,22 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
- hci_uart_set_flow_control(hu, true);
+ if (bcm->dev->drive_rts_on_open)
+ hci_uart_set_flow_control(hu, true);
+
hu->init_speed = bcm->dev->init_speed;
- hu->oper_speed = bcm->dev->oper_speed;
+
+ /* If oper_speed is set, ldisc/serdev will set the baudrate
+ * before calling setup()
+ */
+ if (!bcm->dev->no_early_set_baudrate)
+ hu->oper_speed = bcm->dev->oper_speed;
+
err = bcm_gpio_set_power(bcm->dev, true);
- hci_uart_set_flow_control(hu, false);
+
+ if (bcm->dev->drive_rts_on_open)
+ hci_uart_set_flow_control(hu, false);
+
if (err)
goto err_unset_hu;
}
@@ -565,6 +590,8 @@ static int bcm_setup(struct hci_uart *hu)
/* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
+ else if (bcm->dev && bcm->dev->oper_speed)
+ speed = bcm->dev->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
else
@@ -576,6 +603,16 @@ static int bcm_setup(struct hci_uart *hu)
host_set_baudrate(hu, speed);
}
+ /* PCM parameters if provided */
+ if (bcm->dev && bcm->dev->pcm_int_params[0] != 0xff) {
+ struct bcm_set_pcm_int_params params;
+
+ btbcm_read_pcm_int_params(hu->hdev, &params);
+
+ memcpy(&params, bcm->dev->pcm_int_params, 5);
+ btbcm_write_pcm_int_params(hu->hdev, &params);
+ }
+
finalize:
release_firmware(fw);
@@ -1113,6 +1150,10 @@ static int bcm_acpi_probe(struct bcm_device *dev)
static int bcm_of_probe(struct bcm_device *bdev)
{
device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed);
+ device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
+ bdev->pcm_int_params, 5);
+ bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup");
+
return 0;
}
@@ -1128,6 +1169,9 @@ static int bcm_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->irq = platform_get_irq(pdev, 0);
+ /* Initialize routing field to an unused value */
+ dev->pcm_int_params[0] = 0xff;
+
if (has_acpi_companion(&pdev->dev)) {
ret = bcm_acpi_probe(dev);
if (ret)
@@ -1374,6 +1418,7 @@ static struct platform_driver bcm_driver = {
static int bcm_serdev_probe(struct serdev_device *serdev)
{
struct bcm_device *bcmdev;
+ const struct bcm_device_data *data;
int err;
bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
@@ -1387,6 +1432,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
bcmdev->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, bcmdev);
+ /* Initialize routing field to an unused value */
+ bcmdev->pcm_int_params[0] = 0xff;
+
if (has_acpi_companion(&serdev->dev))
err = bcm_acpi_probe(bcmdev);
else
@@ -1408,6 +1456,12 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
if (err)
dev_err(&serdev->dev, "Failed to power down\n");
+ data = device_get_match_data(bcmdev->dev);
+ if (data) {
+ bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
+ bcmdev->drive_rts_on_open = data->drive_rts_on_open;
+ }
+
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
@@ -1419,12 +1473,21 @@ static void bcm_serdev_remove(struct serdev_device *serdev)
}
#ifdef CONFIG_OF
+static struct bcm_device_data bcm4354_device_data = {
+ .no_early_set_baudrate = true,
+};
+
+static struct bcm_device_data bcm43438_device_data = {
+ .drive_rts_on_open = true,
+};
+
static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm20702a1" },
+ { .compatible = "brcm,bcm4329-bt" },
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
- { .compatible = "brcm,bcm43438-bt" },
- { .compatible = "brcm,bcm43540-bt" },
+ { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
+ { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
{ },
};
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 19ba52005009..6dc1fbeb564b 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -103,6 +103,7 @@ static const struct h4_recv_pkt h4_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
};
/* Recv data */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index dacf297baf59..0b14547482a7 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -385,6 +385,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
/* Remove Three-wire header */
@@ -594,6 +595,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
break;
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
skb_queue_tail(&h5->unrel, skb);
break;
@@ -636,6 +638,7 @@ static bool valid_packet_type(u8 type)
case HCI_ACLDATA_PKT:
case HCI_COMMAND_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
case HCI_3WIRE_LINK_PKT:
case HCI_3WIRE_ACK_PKT:
return true;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f10bdf8e1fc5..d6e0c99ee5eb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
@@ -46,6 +47,7 @@
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
+#define MEMDUMP_TIMEOUT_MS 8000
/* susclk rate */
#define SUSCLK_RATE_32KHZ 32768
@@ -53,12 +55,24 @@
/* Controller debug log header */
#define QCA_DEBUG_HANDLE 0x2EDC
+/* max retry count when init fails */
+#define MAX_INIT_RETRIES 3
+
+/* Controller dump header */
+#define QCA_SSR_DUMP_HANDLE 0x0108
+#define QCA_DUMP_PACKET_SIZE 255
+#define QCA_LAST_SEQUENCE_NUM 0xFFFF
+#define QCA_CRASHBYTE_PACKET_LEN 1096
+#define QCA_MEMDUMP_BYTE 0xFB
+
enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
+ QCA_MEMDUMP_COLLECTION
};
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -81,11 +95,40 @@ enum hci_ibs_clock_state_vote {
HCI_IBS_RX_VOTE_CLOCK_OFF,
};
+/* Controller memory dump states */
+enum qca_memdump_states {
+ QCA_MEMDUMP_IDLE,
+ QCA_MEMDUMP_COLLECTING,
+ QCA_MEMDUMP_COLLECTED,
+ QCA_MEMDUMP_TIMEOUT,
+};
+
+struct qca_memdump_data {
+ char *memdump_buf_head;
+ char *memdump_buf_tail;
+ u32 current_seq_no;
+ u32 received_dump;
+};
+
+struct qca_memdump_event_hdr {
+ __u8 evt;
+ __u8 plen;
+ __u16 opcode;
+ __u16 seq_no;
+ __u8 reserved;
+} __packed;
+
+
+struct qca_dump_size {
+ u32 dump_size;
+} __packed;
+
struct qca_data {
struct hci_uart *hu;
struct sk_buff *rx_skb;
struct sk_buff_head txq;
struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
+ struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
u8 rx_ibs_state; /* HCI_IBS receive side power state */
@@ -95,14 +138,18 @@ struct qca_data {
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
u32 wake_retrans;
+ struct timer_list memdump_timer;
struct workqueue_struct *workqueue;
struct work_struct ws_awake_rx;
struct work_struct ws_awake_device;
struct work_struct ws_rx_vote_off;
struct work_struct ws_tx_vote_off;
+ struct work_struct ctrl_memdump_evt;
+ struct qca_memdump_data *qca_memdump;
unsigned long flags;
struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q;
+ enum qca_memdump_states memdump_state;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -167,6 +214,7 @@ static int qca_regulator_enable(struct qca_serdev *qcadev);
static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
+static void qca_controller_memdump(struct work_struct *work);
static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
{
@@ -474,12 +522,28 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
hci_uart_tx_wakeup(hu);
}
+static void hci_memdump_timeout(struct timer_list *t)
+{
+ struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct hci_uart *hu = qca->hu;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = qca_memdump->memdump_buf_tail;
+
+ bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
+ /* Inject hw error event to reset the device and driver. */
+ hci_reset_dev(hu->hdev);
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ del_timer(&qca->memdump_timer);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+}
+
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca;
- int ret;
BT_DBG("hu %p qca_open", hu);
@@ -492,6 +556,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->txq);
skb_queue_head_init(&qca->tx_wait_q);
+ skb_queue_head_init(&qca->rx_memdump_q);
spin_lock_init(&qca->hci_ibs_lock);
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
@@ -504,7 +569,7 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
-
+ INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
init_waitqueue_head(&qca->suspend_wait_q);
qca->hu = hu;
@@ -519,23 +584,10 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
if (hu->serdev) {
-
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (!qca_is_wcn399x(qcadev->btsoc_type)) {
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
- /* Controller needs time to bootup. */
- msleep(150);
- } else {
+ if (qca_is_wcn399x(qcadev->btsoc_type)) {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
- ret = qca_regulator_enable(qcadev);
- if (ret) {
- destroy_workqueue(qca->workqueue);
- kfree_skb(qca->rx_skb);
- hu->priv = NULL;
- kfree(qca);
- return ret;
- }
}
}
@@ -544,6 +596,7 @@ static int qca_open(struct hci_uart *hu)
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
+ timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -613,7 +666,6 @@ static int qca_flush(struct hci_uart *hu)
/* Close protocol */
static int qca_close(struct hci_uart *hu)
{
- struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
BT_DBG("hu %p qca close", hu);
@@ -622,19 +674,14 @@ static int qca_close(struct hci_uart *hu)
skb_queue_purge(&qca->tx_wait_q);
skb_queue_purge(&qca->txq);
+ skb_queue_purge(&qca->rx_memdump_q);
del_timer(&qca->tx_idle_timer);
del_timer(&qca->wake_retrans_timer);
+ del_timer(&qca->memdump_timer);
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
- if (hu->serdev) {
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type))
- qca_power_shutdown(hu);
- else
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
-
- }
+ qca_power_shutdown(hu);
kfree_skb(qca->rx_skb);
@@ -900,6 +947,125 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
+static void qca_controller_memdump(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ctrl_memdump_evt);
+ struct hci_uart *hu = qca->hu;
+ struct sk_buff *skb;
+ struct qca_memdump_event_hdr *cmd_hdr;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ struct qca_dump_size *dump;
+ char *memdump_buf;
+ char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
+ u16 seq_no;
+ u32 dump_size;
+
+ while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
+
+ if (!qca_memdump) {
+ qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
+ GFP_ATOMIC);
+ if (!qca_memdump)
+ return;
+
+ qca->qca_memdump = qca_memdump;
+ }
+
+ qca->memdump_state = QCA_MEMDUMP_COLLECTING;
+ cmd_hdr = (void *) skb->data;
+ seq_no = __le16_to_cpu(cmd_hdr->seq_no);
+ skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
+
+ if (!seq_no) {
+
+ /* This is the first frame of memdump packet from
+ * the controller, Disable IBS to recevie dump
+ * with out any interruption, ideally time required for
+ * the controller to send the dump is 8 seconds. let us
+ * start timer to handle this asynchronous activity.
+ */
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ dump = (void *) skb->data;
+ dump_size = __le32_to_cpu(dump->dump_size);
+ if (!(dump_size)) {
+ bt_dev_err(hu->hdev, "Rx invalid memdump size");
+ kfree_skb(skb);
+ return;
+ }
+
+ bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
+ dump_size);
+ mod_timer(&qca->memdump_timer, (jiffies +
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
+
+ skb_pull(skb, sizeof(dump_size));
+ memdump_buf = vmalloc(dump_size);
+ qca_memdump->memdump_buf_head = memdump_buf;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ }
+
+ memdump_buf = qca_memdump->memdump_buf_tail;
+
+ /* If sequence no 0 is missed then there is no point in
+ * accepting the other sequences.
+ */
+ if (!memdump_buf) {
+ bt_dev_err(hu->hdev, "QCA: Discarding other packets");
+ kfree(qca_memdump);
+ kfree_skb(skb);
+ qca->qca_memdump = NULL;
+ return;
+ }
+
+ /* There could be chance of missing some packets from
+ * the controller. In such cases let us store the dummy
+ * packets in the buffer.
+ */
+ while ((seq_no > qca_memdump->current_seq_no + 1) &&
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
+ qca_memdump->current_seq_no);
+ memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
+ memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
+ qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
+ qca_memdump->current_seq_no++;
+ }
+
+ memcpy(memdump_buf, (unsigned char *) skb->data, skb->len);
+ memdump_buf = memdump_buf + skb->len;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ qca_memdump->current_seq_no = seq_no + 1;
+ qca_memdump->received_dump += skb->len;
+ qca->qca_memdump = qca_memdump;
+ kfree_skb(skb);
+ if (seq_no == QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
+ qca_memdump->received_dump);
+ memdump_buf = qca_memdump->memdump_buf_head;
+ dev_coredumpv(&hu->serdev->dev, memdump_buf,
+ qca_memdump->received_dump, GFP_KERNEL);
+ del_timer(&qca->memdump_timer);
+ kfree(qca->qca_memdump);
+ qca->qca_memdump = NULL;
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ }
+ }
+
+}
+
+int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ skb_queue_tail(&qca->rx_memdump_q, skb);
+ queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
+
+ return 0;
+}
+
static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
@@ -925,6 +1091,14 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+ /* We receive chip memory dump as an event packet, With a dedicated
+ * handler followed by a hardware error event. When this event is
+ * received we store dump into a file before closing hci. This
+ * dump will help in triaging the issues.
+ */
+ if ((skb->data[0] == HCI_VENDOR_PKT) &&
+ (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
+ return qca_controller_memdump_event(hdev, skb);
return hci_recv_frame(hdev, skb);
}
@@ -1203,6 +1377,91 @@ error:
return ret;
}
+static int qca_send_crashbuffer(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
+ return -ENOMEM;
+ }
+
+ /* We forcefully crash the controller, by sending 0xfb byte for
+ * 1024 times. We also might have chance of losing data, To be
+ * on safer side we send 1096 bytes to the SoC.
+ */
+ memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
+ QCA_CRASHBYTE_PACKET_LEN);
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+ bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ return 0;
+}
+
+static void qca_wait_for_dump_collection(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = NULL;
+
+ wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
+ TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
+
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
+ if (qca_memdump)
+ memdump_buf = qca_memdump->memdump_buf_tail;
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ del_timer(&qca->memdump_timer);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+ }
+}
+
+static void qca_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
+
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ /* If hardware error event received for other than QCA
+ * soc memory dump event, then we need to crash the SOC
+ * and wait here for 8 seconds to get the dump packets.
+ * This will block main thread to be on hold until we
+ * collect dump.
+ */
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ qca_send_crashbuffer(hu);
+ qca_wait_for_dump_collection(hdev);
+ } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
+ /* Let us wait here until memory dump collected or
+ * memory dump timer expired.
+ */
+ bt_dev_info(hdev, "waiting for dump to complete");
+ qca_wait_for_dump_collection(hdev);
+ }
+}
+
+static void qca_cmd_timeout(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE)
+ qca_send_crashbuffer(hu);
+ else
+ bt_dev_info(hdev, "Dump collection is in process");
+}
+
static int qca_wcn3990_init(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1253,11 +1512,37 @@ static int qca_wcn3990_init(struct hci_uart *hu)
return 0;
}
+static int qca_power_on(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
+ struct qca_serdev *qcadev;
+ int ret = 0;
+
+ /* Non-serdev device usually is powered by external power
+ * and don't need additional action in driver for power on
+ */
+ if (!hu->serdev)
+ return 0;
+
+ if (qca_is_wcn399x(soc_type)) {
+ ret = qca_wcn3990_init(hu);
+ } else {
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
+ }
+
+ return ret;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
@@ -1275,24 +1560,21 @@ static int qca_setup(struct hci_uart *hu)
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- if (qca_is_wcn399x(soc_type)) {
- bt_dev_info(hdev, "setting up wcn3990");
+ bt_dev_info(hdev, "setting up %s",
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME");
- /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
- * setup for every hci up.
- */
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+retry:
+ ret = qca_power_on(hdev);
+ if (ret)
+ return ret;
+
+ if (qca_is_wcn399x(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- hu->hdev->shutdown = qca_power_off;
- ret = qca_wcn3990_init(hu);
- if (ret)
- return ret;
ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
} else {
- bt_dev_info(hdev, "ROME setup");
qca_set_speed(hu, QCA_INIT_SPEED);
}
@@ -1320,6 +1602,8 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ hu->hdev->hw_error = qca_hw_error;
+ hu->hdev->cmd_timeout = qca_cmd_timeout;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
@@ -1329,6 +1613,20 @@ static int qca_setup(struct hci_uart *hu)
* patch/nvm-config is found, so run with original fw/config.
*/
ret = 0;
+ } else {
+ if (retries < MAX_INIT_RETRIES) {
+ qca_power_shutdown(hu);
+ if (hu->serdev) {
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hdev, "failed to open port");
+ return ret;
+ }
+ }
+ retries++;
+ goto retry;
+ }
}
/* Setup bdaddr */
@@ -1393,6 +1691,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
qcadev = serdev_device_get_drvdata(hu->serdev);
@@ -1405,20 +1704,36 @@ static void qca_power_shutdown(struct hci_uart *hu)
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
- host_set_baudrate(hu, 2400);
- qca_send_power_pulse(hu, false);
- qca_regulator_disable(qcadev);
+ hu->hdev->hw_error = NULL;
+ hu->hdev->cmd_timeout = NULL;
+
+ /* Non-serdev device usually is powered by external power
+ * and don't need additional action in driver for power down
+ */
+ if (!hu->serdev)
+ return;
+
+ if (qca_is_wcn399x(soc_type)) {
+ host_set_baudrate(hu, 2400);
+ qca_send_power_pulse(hu, false);
+ qca_regulator_disable(qcadev);
+ } else {
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ }
}
static int qca_power_off(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
- /* Perform pre shutdown command */
- qca_send_pre_shutdown_cmd(hdev);
-
- usleep_range(8000, 10000);
+ /* Stop sending shutdown command if soc crashes. */
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ qca_send_pre_shutdown_cmd(hdev);
+ usleep_range(8000, 10000);
+ }
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
qca_power_shutdown(hu);
return 0;
}
@@ -1493,6 +1808,7 @@ static int qca_init_regulators(struct qca_power *qca,
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
+ struct hci_dev *hdev;
const struct qca_vreg_data *data;
int err;
@@ -1501,7 +1817,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->serdev_hu.serdev = serdev;
- data = of_device_get_match_data(&serdev->dev);
+ data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
device_property_read_string(&serdev->dev, "firmware-name",
&qcadev->firmware_name);
@@ -1518,7 +1834,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
data->num_vregs);
if (err) {
BT_ERR("Failed to init regulators:%d", err);
- goto out;
+ return err;
}
qcadev->bt_power->vregs_on = false;
@@ -1531,7 +1847,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
- goto out;
+ return err;
}
} else {
qcadev->btsoc_type = QCA_ROME;
@@ -1557,12 +1873,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err)
+ if (err) {
+ BT_ERR("Rome serdev registration failed");
clk_disable_unprepare(qcadev->susclk);
+ return err;
+ }
}
-out: return err;
+ hdev = qcadev->serdev_hu.hdev;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hdev->shutdown = qca_power_off;
+ return 0;
}
static void qca_serdev_remove(struct serdev_device *serdev)
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 6ab631101019..4e039d7a16f8 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -143,6 +143,13 @@ struct h4_recv_pkt {
.lsize = 1, \
.maxlen = HCI_MAX_EVENT_SIZE
+#define H4_RECV_ISO \
+ .type = HCI_ISODATA_PKT, \
+ .hlen = HCI_ISO_HDR_SIZE, \
+ .loff = 2, \
+ .lsize = 2, \
+ .maxlen = HCI_MAX_FRAME_SIZE \
+
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 65e41c1d760f..8ab26dec5f6e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -178,6 +178,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
if (!data->hdev) {
kfree_skb(skb);
return -ENODEV;
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index d9629fc13a15..6ae48ad80409 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -97,12 +97,12 @@ int __must_check fsl_create_mc_io(struct device *dev,
return -EBUSY;
}
- mc_portal_virt_addr = devm_ioremap_nocache(dev,
+ mc_portal_virt_addr = devm_ioremap(dev,
mc_portal_phys_addr,
mc_portal_size);
if (!mc_portal_virt_addr) {
dev_err(dev,
- "devm_ioremap_nocache failed for MC portal %pa\n",
+ "devm_ioremap failed for MC portal %pa\n",
&mc_portal_phys_addr);
return -ENXIO;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f4d1597df0a2..ccb44fe790a7 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata)
return -EINVAL;
}
+ /* Always add a slot for main clocks fck and ick even if unused */
+ if (!nr_fck)
+ ddata->nr_clocks++;
+ if (!nr_ick)
+ ddata->nr_clocks++;
+
ddata->clocks = devm_kcalloc(ddata->dev,
ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
@@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata)
struct clk *clock;
int i, error;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return 0;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
struct clk *clock;
int i;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index ab154a75acf0..9e84239f88d4 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -941,7 +941,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table = (u32 __iomem *)table;
#else
- bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+ bridge->gatt_table = ioremap(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6271ce250b3..66a62d17a3f5 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1087,7 +1087,7 @@ static void intel_i9xx_setup_flush(void)
}
if (intel_private.ifp_resource.start)
- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
if (!intel_private.i9xx_flush_page)
dev_err(&intel_private.pcidev->dev,
"can't ioremap flush page - no chipset flushing\n");
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index eb108b3c619a..51121a4b82c7 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -204,7 +204,7 @@ static int __init applicom_init(void)
if (pci_enable_device(dev))
return -EIO;
- RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
+ RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
@@ -259,7 +259,7 @@ static int __init applicom_init(void)
/* Now try the specified ISA cards */
for (i = 0; i < MAX_ISA_BOARD; i++) {
- RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+ RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8486c29d8324..914e293ba62b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 290c880266bf..9f205bd1acc0 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -317,7 +317,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
return -EBUSY;
}
- intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+ intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
if (intel_rng_hw->mem == NULL)
return -EBUSY;
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 899ff25f4f28..32d9fe61a225 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -213,6 +213,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 8c78aa090492..7be8067ac4e8 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -81,13 +81,13 @@ static int octeon_rng_probe(struct platform_device *pdev)
return -ENOENT;
- rng->control_status = devm_ioremap_nocache(&pdev->dev,
+ rng->control_status = devm_ioremap(&pdev->dev,
res_ports->start,
sizeof(u64));
if (!rng->control_status)
return -ENOENT;
- rng->result = devm_ioremap_nocache(&pdev->dev,
+ rng->result = devm_ioremap(&pdev->dev,
res_result->start,
sizeof(u64));
if (!rng->result)
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 82f9a6a814ae..e342daa73d1b 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4169,7 +4169,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
MGSLPC_INFO *info = dev_to_port(dev);
unsigned long flags;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 3b53b3e5ec3e..d52bf4df0bca 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(timeouts);
-static struct attribute *tpm_dev_attrs[] = {
+static ssize_t tpm_version_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+ ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
&dev_attr_enabled.attr,
@@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = {
&dev_attr_cancel.attr,
&dev_attr_durations.attr,
&dev_attr_timeouts.attr,
+ &dev_attr_tpm_version_major.attr,
NULL,
};
-static const struct attribute_group tpm_dev_group = {
- .attrs = tpm_dev_attrs,
+static struct attribute *tpm2_dev_attrs[] = {
+ &dev_attr_tpm_version_major.attr,
+ NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+ .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+ .attrs = tpm2_dev_attrs,
};
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
WARN_ON(chip->groups_cnt != 0);
- chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+ else
+ chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 6a11239ccde3..772258de2d1f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3426,11 +3426,17 @@ static int __clk_core_init(struct clk_core *core)
if (core->flags & CLK_IS_CRITICAL) {
unsigned long flags;
- clk_core_prepare(core);
+ ret = clk_core_prepare(core);
+ if (ret)
+ goto out;
flags = clk_enable_lock();
- clk_core_enable(core);
+ ret = clk_core_enable(core);
clk_enable_unlock(flags);
+ if (ret) {
+ clk_core_unprepare(core);
+ goto out;
+ }
}
clk_core_reparent_orphans_nolock();
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index a60a1be937ad..b4a95cbbda98 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f7b370f3acef..f6ce888098be 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
@@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
@@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
@@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
@@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
@@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct clk_regmap *gcc_sdm845_clocks[] = {
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index fbc34beafc78..7b703f14e20b 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -37,8 +37,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
void __iomem *ppr0, *pibc0;
u16 modes;
- ppr0 = ioremap_nocache(PPR0, 2);
- pibc0 = ioremap_nocache(PIBC0, 2);
+ ppr0 = ioremap(PPR0, 2);
+ pibc0 = ioremap(PIBC0, 2);
BUG_ON(!ppr0 || !pibc0);
iowrite16(4, pibc0); /* enable input buffer */
modes = ioread16(ppr0);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 3a991ca1ee36..c9e5a1fb6653 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include "clk.h"
#include "clk-cpu.h"
@@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_subcmus);
}
+ /*
+ * Keep top part of G3D clock path enabled permanently to ensure
+ * that the internal busses get their clock regardless of the
+ * main G3D clock enablement status.
+ */
+ clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+
samsung_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 45a1ed3fe674..50f8d1bc7046 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -23,9 +23,9 @@
*/
static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
- "pll-periph0", "iosc" };
+ "iosc", "pll-periph0" };
static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
- { .index = 2, .shift = 0, .width = 5 },
+ { .index = 3, .shift = 0, .width = 5 },
};
static struct ccu_div ar100_clk = {
@@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div r_apb1_clk = {
- .div = _SUNXI_CCU_DIV(0, 2),
-
- .common = {
- .reg = 0x00c,
- .hw.init = CLK_HW_INIT("r-apb1",
- "r-ahb",
- &ccu_div_ops,
- 0),
- },
-};
+static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0);
static struct ccu_div r_apb2_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 4646fdc61053..4c8c491b87c2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div apb0_clk = {
- .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
-
- .common = {
- .reg = 0x0c,
- .hw.init = CLK_HW_INIT_HW("apb0",
- &ahb0_clk.hw,
- &ccu_div_ops,
- 0),
- },
-};
-
-static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
/*
* Define the parent as an array that can be reused to save space
@@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = {
static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
&ar100_clk.common,
- &a83t_apb0_clk.common,
+ &apb0_clk.common,
&apb0_pio_clk.common,
&apb0_ir_clk.common,
&apb0_timer_clk.common,
@@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
[CLK_AHB0] = &ahb0_clk.hw,
- [CLK_APB0] = &a83t_apb0_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
[CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
[CLK_APB0_IR] = &apb0_ir_clk.common.hw,
[CLK_APB0_TIMER] = &apb0_timer_clk.common.hw,
@@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
{
- /* Fix apb0 bus gate parents here */
- apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
-
sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 897490800102..23bfe1d12f21 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = {
.reg = 0x1f0,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outa", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
@@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = {
.reg = 0x1f4,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outb", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 5c779eec454b..0e36ca3bf3d5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_PLL_DDR1 + 1,
};
static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
@@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_I2S0 + 1,
};
static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
index b0160d305a67..108eeeedcbf7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
@@ -51,6 +51,4 @@
#define CLK_PLL_DDR1 74
-#define CLK_NUMBER (CLK_I2S0 + 1)
-
#endif /* _CCU_SUN8I_H3_H_ */
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index e6bd6d1ea012..f6cdce441cf7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
periph_banks = banks;
clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
- if (!clks)
+ if (!clks) {
kfree(periph_clk_enb_refcnt);
+ return NULL;
+ }
clk_num = num;
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index f65e16c4f3c4..8d4c08b034bd 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
cinfo->iobase = of_iomap(node, 0);
cinfo->dev = &pdev->dev;
pm_runtime_enable(cinfo->dev);
- pm_runtime_irq_safe(cinfo->dev);
pm_runtime_get_sync(cinfo->dev);
atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5fdd76cb1768..cc909e465823 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -88,7 +88,7 @@ config ROCKCHIP_TIMER
select TIMER_OF
select CLKSRC_MMIO
help
- Enables the support for the rockchip timer driver.
+ Enables the support for the Rockchip timer driver.
config ARMADA_370_XP_TIMER
bool "Armada 370 and XP timer driver" if COMPILE_TEST
@@ -162,13 +162,13 @@ config NPCM7XX_TIMER
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
- While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
+ where TIMER0 serves as clockevent and TIMER1 serves as clocksource.
config CADENCE_TTC_TIMER
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK
help
- Enables support for the cadence ttc driver.
+ Enables support for the Cadence TTC driver.
config ASM9260_TIMER
bool "ASM9260 timer driver" if COMPILE_TEST
@@ -190,10 +190,10 @@ config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on HAS_IOMEM
help
- Use the always on PRCMU Timer as clocksource
+ Use the always on PRCMU Timer as clocksource.
config CLPS711X_TIMER
- bool "Cirrus logic timer driver" if COMPILE_TEST
+ bool "Cirrus Logic timer driver" if COMPILE_TEST
select CLKSRC_MMIO
help
Enables support for the Cirrus Logic PS711 timer.
@@ -205,11 +205,11 @@ config ATLAS7_TIMER
Enables support for the Atlas7 timer.
config MXS_TIMER
- bool "Mxs timer driver" if COMPILE_TEST
+ bool "MXS timer driver" if COMPILE_TEST
select CLKSRC_MMIO
select STMP_DEVICE
help
- Enables support for the Mxs timer.
+ Enables support for the MXS timer.
config PRIMA2_TIMER
bool "Prima2 timer driver" if COMPILE_TEST
@@ -238,10 +238,10 @@ config KEYSTONE_TIMER
Enables support for the Keystone timer.
config INTEGRATOR_AP_TIMER
- bool "Integrator-ap timer driver" if COMPILE_TEST
+ bool "Integrator-AP timer driver" if COMPILE_TEST
select CLKSRC_MMIO
help
- Enables support for the Integrator-ap timer.
+ Enables support for the Integrator-AP timer.
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
@@ -283,8 +283,8 @@ config CLKSRC_NPS
select TIMER_OF if OF
help
NPS400 clocksource support.
- Got 64 bit counter with update rate up to 1000MHz.
- This counter is accessed via couple of 32 bit memory mapped registers.
+ It has a 64-bit counter with update rate up to 1000MHz.
+ This counter is accessed via couple of 32-bit memory-mapped registers.
config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
@@ -305,14 +305,14 @@ config ARC_TIMERS
help
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
(ARC700 as well as ARC HS38).
- TIMER0 serves as clockevent while TIMER1 provides clocksource
+ TIMER0 serves as clockevent while TIMER1 provides clocksource.
config ARC_TIMERS_64BIT
bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
depends on ARC_TIMERS
select TIMER_OF
help
- This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+ This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP).
RTC is implemented inside the core, while GFRC sits outside the core in
ARConnect IP block. Driver automatically picks one of them for clocksource
as appropriate.
@@ -390,7 +390,7 @@ config ARM_GLOBAL_TIMER
select TIMER_OF if OF
depends on ARM
help
- This options enables support for the ARM global timer unit
+ This option enables support for the ARM global timer unit.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module" if COMPILE_TEST
@@ -403,14 +403,14 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
depends on ARM_GLOBAL_TIMER
default y
help
- Use ARM global timer clock source as sched_clock
+ Use ARM global timer clock source as sched_clock.
config ARMV7M_SYSTICK
bool "Support for the ARMv7M system time" if COMPILE_TEST
select TIMER_OF if OF
select CLKSRC_MMIO
help
- This options enables support for the ARMv7M system timer unit
+ This option enables support for the ARMv7M system timer unit.
config ATMEL_PIT
bool "Atmel PIT support" if COMPILE_TEST
@@ -460,7 +460,7 @@ config VF_PIT_TIMER
bool
select CLKSRC_MMIO
help
- Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
config OXNAS_RPS_TIMER
bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
@@ -470,7 +470,7 @@ config OXNAS_RPS_TIMER
This enables support for the Oxford Semiconductor OXNAS RPS timers.
config SYS_SUPPORTS_SH_CMT
- bool
+ bool
config MTK_TIMER
bool "Mediatek timer driver" if COMPILE_TEST
@@ -490,13 +490,13 @@ config SPRD_TIMER
Enables support for the Spreadtrum timer driver.
config SYS_SUPPORTS_SH_MTU2
- bool
+ bool
config SYS_SUPPORTS_SH_TMU
- bool
+ bool
config SYS_SUPPORTS_EM_STI
- bool
+ bool
config CLKSRC_JCORE_PIT
bool "J-Core PIT timer driver" if COMPILE_TEST
@@ -523,7 +523,7 @@ config SH_TIMER_MTU2
help
This enables build of a clockevent driver for the Multi-Function
Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
- This hardware comes with 16 bit-timer registers.
+ This hardware comes with 16-bit timer registers.
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
@@ -580,7 +580,7 @@ config CLKSRC_TANGO_XTAL
select TIMER_OF
select CLKSRC_MMIO
help
- This enables the clocksource for Tango SoC
+ This enables the clocksource for Tango SoC.
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
@@ -591,24 +591,24 @@ config CLKSRC_PXA
platforms.
config H8300_TMR8
- bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the 8 bits timer for the H8300 platform.
config H8300_TMR16
- bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the 16 bits timer for the H8300 platform with the
- H83069 cpu.
+ H83069 CPU.
config H8300_TPU
- bool "Clocksource for the H8300 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clocksource for the H8300 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the clocksource for the H8300 platform with the
- H8S2678 cpu.
+ H8S2678 CPU.
config CLKSRC_IMX_GPT
bool "Clocksource using i.MX GPT" if COMPILE_TEST
@@ -666,8 +666,8 @@ config CSKY_MP_TIMER
help
Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP
system.
- csky,mptimer is not only used in SMP system, it also could be used
- single core system. It's not a mmio reg and it use mtcr/mfcr instruction.
+ csky,mptimer is not only used in SMP system, it also could be used in
+ single core system. It's not a mmio reg and it uses mtcr/mfcr instruction.
config GX6605S_TIMER
bool "Gx6605s SOC system timer driver" if COMPILE_TEST
@@ -697,4 +697,14 @@ config INGENIC_TIMER
help
Support for the timer/counter unit of the Ingenic JZ SoCs.
+config MICROCHIP_PIT64B
+ bool "Microchip PIT64B support"
+ depends on OF || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ This option enables Microchip PIT64B timer for Atmel
+ based system. It supports the oneshot, the periodic
+ modes and high resolution. It is used as a clocksource
+ and a clockevent.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 4dfe4225ece7..713686faa549 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
+obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 2b196cbfadb6..b235f446ee50 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
- goto err_iounmap;
+ goto err_timer_free;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
return 0;
+err_timer_free:
+ kfree(timer);
+
err_iounmap:
iounmap(base);
return ret;
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 9039df4f90e2..ab190dffb1ed 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -279,9 +279,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
- struct resource *res;
- int irq;
- int ret;
+ int irq, ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL)
@@ -295,8 +293,7 @@ static int em_sti_probe(struct platform_device *pdev)
return irq;
/* map memory, let base point to the STI instance */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(&pdev->dev, res);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
return PTR_ERR(p->base);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 74cb299f5089..a267fe31ef13 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -4,7 +4,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 MCT(Multi-Core Timer) support
+ * Exynos4 MCT(Multi-Core Timer) support
*/
#include <linux/interrupt.h>
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 287d8d58c21a..9d808d595ca8 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta,
{
u64 current_tick;
- current_tick = hyperv_cs->read(NULL);
+ current_tick = hv_read_reference_counter();
current_tick += delta;
hv_init_timer(0, current_tick);
return 0;
@@ -302,22 +302,33 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
* the other that uses the TSC reference page feature as defined in the
* TLFS. The MSR version is for compatibility with old versions of
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
+ *
+ * The Hyper-V clocksource ratings of 250 are chosen to be below the
+ * TSC clocksource rating of 300. In configurations where Hyper-V offers
+ * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
+ * is available and preferred. With the higher rating, it will be the
+ * default. On older hardware and Hyper-V versions, the TSC is marked
+ * "unstable", so no TSC clocksource is created and the selected Hyper-V
+ * clocksource will be the default.
*/
-struct clocksource *hyperv_cs;
-EXPORT_SYMBOL_GPL(hyperv_cs);
+u64 (*hv_read_reference_counter)(void);
+EXPORT_SYMBOL_GPL(hv_read_reference_counter);
-static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE);
+static union {
+ struct ms_hyperv_tsc_page page;
+ u8 reserved[PAGE_SIZE];
+} tsc_pg __aligned(PAGE_SIZE);
struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
- return &tsc_pg;
+ return &tsc_pg.page;
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
-static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
+static u64 notrace read_hv_clock_tsc(void)
{
- u64 current_tick = hv_read_tsc_page(&tsc_pg);
+ u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
if (current_tick == U64_MAX)
hv_get_time_ref_count(current_tick);
@@ -325,20 +336,50 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
return current_tick;
}
+static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
+{
+ return read_hv_clock_tsc();
+}
+
static u64 read_hv_sched_clock_tsc(void)
{
- return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
+ return read_hv_clock_tsc() - hv_sched_clock_offset;
+}
+
+static void suspend_hv_clock_tsc(struct clocksource *arg)
+{
+ u64 tsc_msr;
+
+ /* Disable the TSC page */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= ~BIT_ULL(0);
+ hv_set_reference_tsc(tsc_msr);
+}
+
+
+static void resume_hv_clock_tsc(struct clocksource *arg)
+{
+ phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
+ u64 tsc_msr;
+
+ /* Re-enable the TSC page */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= GENMASK_ULL(11, 0);
+ tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
+ hv_set_reference_tsc(tsc_msr);
}
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
- .rating = 400,
- .read = read_hv_clock_tsc,
+ .rating = 250,
+ .read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend= suspend_hv_clock_tsc,
+ .resume = resume_hv_clock_tsc,
};
-static u64 notrace read_hv_clock_msr(struct clocksource *arg)
+static u64 notrace read_hv_clock_msr(void)
{
u64 current_tick;
/*
@@ -350,15 +391,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
return current_tick;
}
+static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
+{
+ return read_hv_clock_msr();
+}
+
static u64 read_hv_sched_clock_msr(void)
{
- return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
+ return read_hv_clock_msr() - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
- .rating = 400,
- .read = read_hv_clock_msr,
+ .rating = 250,
+ .read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -371,8 +417,8 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
- hyperv_cs = &hyperv_cs_tsc;
- phys_addr = virt_to_phys(&tsc_pg);
+ hv_read_reference_counter = read_hv_clock_tsc;
+ phys_addr = virt_to_phys(hv_get_tsc_page());
/*
* The Hyper-V TLFS specifies to preserve the value of reserved
@@ -389,7 +435,7 @@ static bool __init hv_init_tsc_clocksource(void)
hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_tsc);
return true;
@@ -411,10 +457,10 @@ void __init hv_init_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
return;
- hyperv_cs = &hyperv_cs_msr;
+ hv_read_reference_counter = read_hv_clock_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 9cde50cb3220..12ac75f7571f 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -905,7 +905,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
return -ENXIO;
}
- cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
+ cmt->mapbase = ioremap(mem->start, resource_size(mem));
if (cmt->mapbase == NULL) {
dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 64526e50d471..bfccb31e94ad 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -377,7 +377,7 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
return -ENXIO;
}
- mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ mtu->mapbase = ioremap(res->start, resource_size(res));
if (mtu->mapbase == NULL)
return -ENXIO;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index d49690d15536..d41df9ba3725 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -486,7 +486,7 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
return -ENXIO;
}
- tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ tmu->mapbase = ioremap(res->start, resource_size(res));
if (tmu->mapbase == NULL)
return -ENXIO;
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 88fe2e9ba9a3..38858e141731 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -15,6 +15,8 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
/*
* This driver configures the 2 16/32-bit count-up timers as follows:
@@ -464,13 +466,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
return 0;
}
-/**
- * ttc_timer_init - Initialize the timer
- *
- * Initializes the timer hardware and register the clock source and clock event
- * timers with Linux kernal timer framework
- */
-static int __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_probe(struct platform_device *pdev)
{
unsigned int irq;
void __iomem *timer_baseaddr;
@@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer)
static int initialized;
int clksel, ret;
u32 timer_width = 16;
+ struct device_node *timer = pdev->dev.of_node;
if (initialized)
return 0;
@@ -532,4 +529,17 @@ static int __init ttc_timer_init(struct device_node *timer)
return 0;
}
-TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+static const struct of_device_id ttc_timer_of_match[] = {
+ {.compatible = "cdns,ttc"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ttc_timer_of_match);
+
+static struct platform_driver ttc_timer_driver = {
+ .driver = {
+ .name = "cdns_ttc_timer",
+ .of_match_table = ttc_timer_of_match,
+ },
+};
+builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
new file mode 100644
index 000000000000..bd63d3484838
--- /dev/null
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 64-bit Periodic Interval Timer driver
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define MCHP_PIT64B_CR 0x00 /* Control Register */
+#define MCHP_PIT64B_CR_START BIT(0)
+#define MCHP_PIT64B_CR_SWRST BIT(8)
+
+#define MCHP_PIT64B_MR 0x04 /* Mode Register */
+#define MCHP_PIT64B_MR_CONT BIT(0)
+#define MCHP_PIT64B_MR_ONE_SHOT (0)
+#define MCHP_PIT64B_MR_SGCLK BIT(3)
+#define MCHP_PIT64B_MR_PRES GENMASK(11, 8)
+
+#define MCHP_PIT64B_LSB_PR 0x08 /* LSB Period Register */
+
+#define MCHP_PIT64B_MSB_PR 0x0C /* MSB Period Register */
+
+#define MCHP_PIT64B_IER 0x10 /* Interrupt Enable Register */
+#define MCHP_PIT64B_IER_PERIOD BIT(0)
+
+#define MCHP_PIT64B_ISR 0x1C /* Interrupt Status Register */
+
+#define MCHP_PIT64B_TLSBR 0x20 /* Timer LSB Register */
+
+#define MCHP_PIT64B_TMSBR 0x24 /* Timer MSB Register */
+
+#define MCHP_PIT64B_PRES_MAX 0x10
+#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0)
+#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8))
+#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
+#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */
+#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */
+
+#define MCHP_PIT64B_NAME "pit64b"
+
+/**
+ * struct mchp_pit64b_timer - PIT64B timer data structure
+ * @base: base address of PIT64B hardware block
+ * @pclk: PIT64B's peripheral clock
+ * @gclk: PIT64B's generic clock
+ * @mode: precomputed value for mode register
+ */
+struct mchp_pit64b_timer {
+ void __iomem *base;
+ struct clk *pclk;
+ struct clk *gclk;
+ u32 mode;
+};
+
+/**
+ * mchp_pit64b_clkevt - PIT64B clockevent data structure
+ * @timer: PIT64B timer
+ * @clkevt: clockevent
+ */
+struct mchp_pit64b_clkevt {
+ struct mchp_pit64b_timer timer;
+ struct clock_event_device clkevt;
+};
+
+#define to_mchp_pit64b_timer(x) \
+ ((struct mchp_pit64b_timer *)container_of(x,\
+ struct mchp_pit64b_clkevt, clkevt))
+
+/* Base address for clocksource timer. */
+static void __iomem *mchp_pit64b_cs_base;
+/* Default cycles for clockevent timer. */
+static u64 mchp_pit64b_ce_cycles;
+
+static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
+{
+ unsigned long flags;
+ u32 low, high;
+
+ raw_local_irq_save(flags);
+
+ /*
+ * When using a 64 bit period TLSB must be read first, followed by the
+ * read of TMSB. This sequence generates an atomic read of the 64 bit
+ * timer value whatever the lapse of time between the accesses.
+ */
+ low = readl_relaxed(base + MCHP_PIT64B_TLSBR);
+ high = readl_relaxed(base + MCHP_PIT64B_TMSBR);
+
+ raw_local_irq_restore(flags);
+
+ return (((u64)high << 32) | low);
+}
+
+static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
+ u64 cycles, u32 mode, u32 irqs)
+{
+ u32 low, high;
+
+ low = cycles & MCHP_PIT64B_LSBMASK;
+ high = cycles >> 32;
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR);
+ writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR);
+ writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR);
+ writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER);
+ writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
+}
+
+static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static u64 mchp_pit64b_sched_read_clk(void)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+
+ return 0;
+}
+
+static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
+ MCHP_PIT64B_IER_PERIOD);
+
+ return 0;
+}
+
+static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
+ struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
+ MCHP_PIT64B_IER_PERIOD);
+
+ return 0;
+}
+
+static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer->gclk);
+ clk_disable_unprepare(timer->pclk);
+}
+
+static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ clk_prepare_enable(timer->pclk);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_prepare_enable(timer->gclk);
+}
+
+static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
+{
+ struct mchp_pit64b_clkevt *irq_data = dev_id;
+
+ /* Need to clear the interrupt. */
+ readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR);
+
+ irq_data->clkevt.event_handler(&irq_data->clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
+ u32 max_rate)
+{
+ u32 tmp;
+
+ for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) {
+ tmp = clk_rate / (*pres + 1);
+ if (tmp <= max_rate)
+ break;
+ }
+
+ /* Use the bigest prescaler if we didn't match one. */
+ if (*pres == MCHP_PIT64B_PRES_MAX)
+ *pres = MCHP_PIT64B_PRES_MAX - 1;
+}
+
+/**
+ * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at
+ * runtime; this includes prescaler and SGCLK bit
+ *
+ * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to
+ * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate
+ * could be changed via clock APIs. The chosen clock (pclk or gclk) could be
+ * divided by the internal PIT64B's divider.
+ *
+ * This function, first tries to use GCLK by requesting the desired rate from
+ * PMC and then using the internal PIT64B prescaler, if any, to reach the
+ * requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware)
+ * then the function falls back on using PCLK as clock source for PIT64B timer
+ * choosing the highest prescaler in case it doesn't locate one to match the
+ * requested frequency.
+ *
+ * Below is presented the PIT64B block in relation with PMC:
+ *
+ * PIT64B
+ * PMC +------------------------------------+
+ * +----+ | +-----+ |
+ * | |-->gclk -->|-->| | +---------+ +-----+ |
+ * | | | | MUX |--->| Divider |->|timer| |
+ * | |-->pclk -->|-->| | +---------+ +-----+ |
+ * +----+ | +-----+ |
+ * | ^ |
+ * | sel |
+ * +------------------------------------+
+ *
+ * Where:
+ * - gclk rate <= pclk rate/3
+ * - gclk rate could be requested from PMC
+ * - pclk rate is fixed (cannot be requested from PMC)
+ */
+static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer,
+ unsigned long max_rate)
+{
+ unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX;
+ long gclk_round = 0;
+ u32 pres, best_pres = 0;
+
+ pclk_rate = clk_get_rate(timer->pclk);
+ if (!pclk_rate)
+ return -EINVAL;
+
+ timer->mode = 0;
+
+ /* Try using GCLK. */
+ gclk_round = clk_round_rate(timer->gclk, max_rate);
+ if (gclk_round < 0)
+ goto pclk;
+
+ if (pclk_rate / gclk_round < 3)
+ goto pclk;
+
+ mchp_pit64b_pres_compute(&pres, gclk_round, max_rate);
+ best_diff = abs(gclk_round / (pres + 1) - max_rate);
+ best_pres = pres;
+
+ if (!best_diff) {
+ timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ goto done;
+ }
+
+pclk:
+ /* Check if requested rate could be obtained using PCLK. */
+ mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate);
+ diff = abs(pclk_rate / (pres + 1) - max_rate);
+
+ if (best_diff > diff) {
+ /* Use PCLK. */
+ best_pres = pres;
+ } else {
+ /* Use GCLK. */
+ timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ clk_set_rate(timer->gclk, gclk_round);
+ }
+
+done:
+ timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres);
+
+ pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n",
+ timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres,
+ timer->mode & MCHP_PIT64B_MR_SGCLK ?
+ gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1));
+
+ return 0;
+}
+
+static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
+ u32 clk_rate)
+{
+ int ret;
+
+ mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
+
+ mchp_pit64b_cs_base = timer->base;
+
+ ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate,
+ 210, 64, mchp_pit64b_clksrc_read);
+ if (ret) {
+ pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
+
+ /* Stop timer. */
+ writel_relaxed(MCHP_PIT64B_CR_SWRST,
+ timer->base + MCHP_PIT64B_CR);
+
+ return ret;
+ }
+
+ sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
+
+ return 0;
+}
+
+static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
+ u32 clk_rate, u32 irq)
+{
+ struct mchp_pit64b_clkevt *ce;
+ int ret;
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ);
+
+ ce->timer.base = timer->base;
+ ce->timer.pclk = timer->pclk;
+ ce->timer.gclk = timer->gclk;
+ ce->timer.mode = timer->mode;
+ ce->clkevt.name = MCHP_PIT64B_NAME;
+ ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
+ ce->clkevt.rating = 150;
+ ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown;
+ ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic;
+ ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event;
+ ce->clkevt.suspend = mchp_pit64b_clkevt_suspend;
+ ce->clkevt.resume = mchp_pit64b_clkevt_resume;
+ ce->clkevt.cpumask = cpumask_of(0);
+ ce->clkevt.irq = irq;
+
+ ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER,
+ "pit64b_tick", ce);
+ if (ret) {
+ pr_debug("clkevt: Failed to setup PIT64B IRQ\n");
+ kfree(ce);
+ return ret;
+ }
+
+ clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
+ bool clkevt)
+{
+ u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ;
+ struct mchp_pit64b_timer timer;
+ unsigned long clk_rate;
+ u32 irq = 0;
+ int ret;
+
+ /* Parse DT node. */
+ timer.pclk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(timer.pclk))
+ return PTR_ERR(timer.pclk);
+
+ timer.gclk = of_clk_get_by_name(node, "gclk");
+ if (IS_ERR(timer.gclk))
+ return PTR_ERR(timer.gclk);
+
+ timer.base = of_iomap(node, 0);
+ if (!timer.base)
+ return -ENXIO;
+
+ if (clkevt) {
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto io_unmap;
+ }
+ }
+
+ /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
+ ret = mchp_pit64b_init_mode(&timer, freq);
+ if (ret)
+ goto irq_unmap;
+
+ ret = clk_prepare_enable(timer.pclk);
+ if (ret)
+ goto irq_unmap;
+
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK) {
+ ret = clk_prepare_enable(timer.gclk);
+ if (ret)
+ goto pclk_unprepare;
+
+ clk_rate = clk_get_rate(timer.gclk);
+ } else {
+ clk_rate = clk_get_rate(timer.pclk);
+ }
+ clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1);
+
+ if (clkevt)
+ ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq);
+ else
+ ret = mchp_pit64b_init_clksrc(&timer, clk_rate);
+
+ if (ret)
+ goto gclk_unprepare;
+
+ return 0;
+
+gclk_unprepare:
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer.gclk);
+pclk_unprepare:
+ clk_disable_unprepare(timer.pclk);
+irq_unmap:
+ irq_dispose_mapping(irq);
+io_unmap:
+ iounmap(timer.base);
+
+ return ret;
+}
+
+static int __init mchp_pit64b_dt_init(struct device_node *node)
+{
+ static int inits;
+
+ switch (inits++) {
+ case 0:
+ /* 1st request, register clockevent. */
+ return mchp_pit64b_dt_init_timer(node, true);
+ case 1:
+ /* 2nd request, register clocksource. */
+ return mchp_pit64b_dt_init_timer(node, false);
+ }
+
+ /* The rest, don't care. */
+ return -EINVAL;
+}
+
+TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 5394d9dbdfbc..269a994d6a99 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -780,7 +780,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
struct omap_dm_timer *timer;
- struct resource *mem, *irq;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
@@ -796,24 +795,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (unlikely(!irq)) {
- dev_err(dev, "%s: no IRQ resource.\n", __func__);
- return -ENODEV;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!mem)) {
- dev_err(dev, "%s: no memory resource.\n", __func__);
- return -ENODEV;
- }
-
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
+ timer->irq = platform_get_irq(pdev, 0);
+ if (timer->irq < 0)
+ return timer->irq;
+
timer->fclk = ERR_PTR(-ENODEV);
- timer->io_base = devm_ioremap_resource(dev, mem);
+ timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
@@ -836,7 +827,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (pdata)
timer->errata = pdata->timer_errata;
- timer->irq = irq->start;
timer->pdev = pdev;
pm_runtime_enable(dev);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 77b0e5d0fb13..4f86ce2db34f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct private_data *priv = policy->driver_data;
+ cpufreq_cpu_put(policy);
+
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8d8da763adc5..a06777c35fc0 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -39,7 +39,7 @@
static struct cppc_cpudata **all_cpu_data;
struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE +1];
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
@@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void)
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision)
+ wa_info[i].oem_revision == tbl->oem_revision) {
apply_hisi_workaround = true;
+ break;
+ }
}
+
+ acpi_put_table(tbl);
}
/* Callback function used to retrieve the max frequency from DMI */
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index aba591d57c67..f2ae9cd455c1 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
{ .compatible = "marvell,armadaxp", },
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 85a6efd6b68f..6cb8193421ea 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
else
@@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = 0xb;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d2fa3e9ccd97..ad6a17cf0011 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -172,7 +172,7 @@ struct vid_data {
/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
- * @turbo_disabled: Whethet or not turbo P-states are available at all,
+ * @turbo_disabled: Whether or not turbo P-states are available at all,
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index cb74bdc5baaa..70ad8fe1d78b 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct resource *res;
int err;
priv.dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv.base = devm_ioremap_resource(&pdev->dev, res);
+ priv.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e9caa9586982..909f40fbcde2 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void)
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
- cpu_freq = LOONGSON_CHIPCFG(0);
- LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
+ cpu_freq = readl(LOONGSON_CHIPCFG);
+ /* Put CPU into wait mode */
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+ /* Restore CPU state */
+ writel(cpu_freq, LOONGSON_CHIPCFG);
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index fdc767fdbe6a..89d4fa8b65e9 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void)
goto out_free;
}
- pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
+ pcch_virt_addr = ioremap(mem_resource->minimum,
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 106910351c41..5c221bc90210 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
int ret;
+ struct cpufreq_policy *policy;
mutex_lock(&cpufreq_lock);
@@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
*/
if (s3c_freq->is_dvs) {
pr_debug("cpufreq: leave dvs on reboot\n");
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
+ cpufreq_cpu_put(policy);
+
if (ret < 0)
return NOTIFY_BAD;
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5d10030f2560..e84281e2561d 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
int ret;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
+ cpufreq_cpu_put(policy);
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index bcecb068b51b..2e233ad72758 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- struct resource *res;
unsigned int i = 0, err;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
goto put_bpmp;
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index a224d33dda7f..62272ecfa771 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
+ depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
depends on MCPM && !ARM64
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
@@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
- depends on MACH_KIRKWOOD && !ARM64
+ depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64
help
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
config ARM_ZYNQ_CPUIDLE
bool "CPU Idle Driver for Xilinx Zynq processors"
- depends on ARCH_ZYNQ && !ARM64
+ depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Xilinx Zynq processors.
@@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
- depends on ARCH_AT91 && !ARM64
+ depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle for AT91 processors.
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
- depends on ARCH_EXYNOS && !ARM64
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
Select this to enable cpuidle for Exynos processors.
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
- depends on ARCH_MVEBU && !ARM64
+ depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index b607278df25b..04003b90dc49 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -89,6 +89,7 @@
* @coupled_cpus: mask of cpus that are part of the coupled set
* @requested_state: array of requested states for cpus in the coupled set
* @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @abort_barrier: synchronisation point for abort cases
* @online_count: count of cpus that are online
* @refcnt: reference count of cpuidle devices that are using this struct
* @prevent: flag to prevent coupled idle while a cpu is hotplugging
@@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu)
/**
* cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
- * @dev: struct cpuidle_device for this cpu
+ * @this_cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Calls cpuidle_coupled_poke on all other online cpus.
@@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
/**
* cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
* @next_state: the index in drv->states of the requested state for this cpu
*
@@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu,
/**
* cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Removes the requested idle state for the specified cpuidle device.
@@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
/**
* cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
- * @cpu - this cpu
+ * @cpu: this cpu
*
* Turns on interrupts and spins until any outstanding poke interrupts have
* been processed and the poke bit has been cleared.
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c
index 6e36740f5719..fc22c59b6c73 100644
--- a/drivers/cpuidle/cpuidle-clps711x.c
+++ b/drivers/cpuidle/cpuidle-clps711x.c
@@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = {
static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
+ clps711x_halt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clps711x_halt))
return PTR_ERR(clps711x_halt);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index d23d8f468c12..511c4f46027a 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = {
/* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+ ddr_operation_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddr_operation_base))
return PTR_ERR(ddr_operation_base);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 33d19c8eb027..de81298051b3 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns)
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
+ *
+ * Return: the index of the deepest available idle state.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
@@ -572,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
if (!try_module_get(drv->owner))
return -EINVAL;
- for (i = 0; i < drv->state_count; i++)
+ for (i = 0; i < drv->state_count; i++) {
if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ }
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index ce6a5f80fb83..4070e573bf43 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
- drv->refcnt = 0;
-
/*
* Use all possible CPUs as the default, because if the kernel boots
* with some CPUs offline and then we online one of them, the CPU
@@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
*/
static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (WARN_ON(drv->refcnt > 0))
- return;
-
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
@@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
/**
- * cpuidle_driver_ref - get a reference to the driver.
- *
- * Increment the reference counter of the cpuidle driver associated with
- * the current CPU.
- *
- * Returns a pointer to the driver, or NULL if the current CPU has no driver.
- */
-struct cpuidle_driver *cpuidle_driver_ref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv)
- drv->refcnt++;
-
- spin_unlock(&cpuidle_driver_lock);
- return drv;
-}
-
-/**
- * cpuidle_driver_unref - puts down the refcount for the driver
- *
- * Decrement the reference counter of the cpuidle driver associated with
- * the current CPU.
- */
-void cpuidle_driver_unref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv && !WARN_ON(drv->refcnt <= 0))
- drv->refcnt--;
-
- spin_unlock(&cpuidle_driver_lock);
-}
-
-/**
* cpuidle_driver_state_disabled - Disable or enable an idle state
* @drv: cpuidle driver owning the state
* @idx: State index
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index de7e706efd46..6deaaf5f05b5 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* pattern detection.
*/
cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
- if (cpu_data->interval_idx > INTERVALS)
+ if (cpu_data->interval_idx >= INTERVALS)
cpu_data->interval_idx = 0;
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 38ef770be90d..cdeedbf02646 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = {
/**
* cpuidle_add_interface - add CPU global sysfs attributes
+ * @dev: the target device
*/
int cpuidle_add_interface(struct device *dev)
{
@@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev)
/**
* cpuidle_remove_interface - remove CPU global sysfs attributes
+ * @dev: the target device
*/
void cpuidle_remove_interface(struct device *dev)
{
@@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state,
return size;
}
+static ssize_t show_state_default_status(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
@@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
define_one_state_ro(above, show_state_above);
define_one_state_ro(below, show_state_below);
+define_one_state_ro(default_status, show_state_default_status);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
@@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_disable.attr,
&attr_above.attr,
&attr_below.attr,
+ &attr_default_status.attr,
NULL
};
@@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = {
/**
* cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
@@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
/**
* cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
{
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 91eb768d4221..c2767ed54dfe 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -248,15 +248,15 @@ config CRYPTO_DEV_MARVELL_CESA
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ depends on SPARC64
+ help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
@@ -357,7 +357,7 @@ config CRYPTO_DEV_OMAP
depends on ARCH_OMAP2PLUS
help
OMAP processors have various crypto HW accelerators. Select this if
- you want to use the OMAP modules for any of the crypto algorithms.
+ you want to use the OMAP modules for any of the crypto algorithms.
if CRYPTO_DEV_OMAP
@@ -430,7 +430,7 @@ config CRYPTO_DEV_SAHARA
found in some Freescale i.MX chips.
config CRYPTO_DEV_EXYNOS_RNG
- tristate "EXYNOS HW pseudo random number generator support"
+ tristate "Exynos HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_RNG
@@ -618,6 +618,14 @@ config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
+ help
+ This driver supports Qualcomm crypto engine accelerator
+ hardware. To compile this driver as a module, choose M here. The
+ module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SKCIPHER
+ bool
+ depends on CRYPTO_DEV_QCE
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_ECB
@@ -625,10 +633,57 @@ config CRYPTO_DEV_QCE
select CRYPTO_XTS
select CRYPTO_CTR
select CRYPTO_SKCIPHER
- help
- This driver supports Qualcomm crypto engine accelerator
- hardware. To compile this driver as a module, choose M here. The
- module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SHA
+ bool
+ depends on CRYPTO_DEV_QCE
+
+choice
+ prompt "Algorithms enabled for QCE acceleration"
+ default CRYPTO_DEV_QCE_ENABLE_ALL
+ depends on CRYPTO_DEV_QCE
+ help
+ This option allows to choose whether to build support for all algorihtms
+ (default), hashes-only, or skciphers-only.
+
+ The QCE engine does not appear to scale as well as the CPU to handle
+ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
+ QCE handles only 2 requests in parallel.
+
+ Ipsec throughput seems to improve when disabling either family of
+ algorithms, sharing the load with the CPU. Enabling skciphers-only
+ appears to work best.
+
+ config CRYPTO_DEV_QCE_ENABLE_ALL
+ bool "All supported algorithms"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable all supported algorithms:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (CBC, ECB)
+ - DES (CBC, ECB)
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
+ bool "Symmetric-key ciphers only"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ help
+ Enable symmetric-key ciphers only:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (ECB, CBC)
+ - DES (ECB, CBC)
+
+ config CRYPTO_DEV_QCE_ENABLE_SHA
+ bool "Hash/HMAC only"
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable hashes/HMAC algorithms only:
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+endchoice
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
@@ -639,7 +694,7 @@ config CRYPTO_DEV_QCOM_RNG
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. The
- module will be called qcom-rng. If unsure, say N.
+ module will be called qcom-rng. If unsure, say N.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
@@ -716,7 +771,7 @@ source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
- depends on OF || PCI || COMPILE_TEST
+ depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index cb2b0874f68f..7f22d305178e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
op->keylen = keylen;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 814cd12149a9..a2b67f7f8a81 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
@@ -22,6 +23,14 @@
#include "sun4i-ss.h"
+static const struct ss_variant ss_a10_variant = {
+ .sha1_in_be = false,
+};
+
+static const struct ss_variant ss_a33_variant = {
+ .sha1_in_be = true,
+};
+
static struct sun4i_ss_alg_template ss_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.mode = SS_OP_MD5,
@@ -273,7 +282,7 @@ err_enable:
return err;
}
-const struct dev_pm_ops sun4i_ss_pm_ops = {
+static const struct dev_pm_ops sun4i_ss_pm_ops = {
SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
};
@@ -323,6 +332,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
return PTR_ERR(ss->base);
}
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Security System variant\n");
+ return -EINVAL;
+ }
+
ss->ssclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(ss->ssclk)) {
err = PTR_ERR(ss->ssclk);
@@ -484,7 +499,12 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
static const struct of_device_id a20ss_crypto_of_match_table[] = {
- { .compatible = "allwinner,sun4i-a10-crypto" },
+ { .compatible = "allwinner,sun4i-a10-crypto",
+ .data = &ss_a10_variant
+ },
+ { .compatible = "allwinner,sun8i-a33-crypto",
+ .data = &ss_a33_variant
+ },
{}
};
MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fdc0e6cdbb85..dc35edd90034 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -479,7 +479,10 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ if (ss->variant->sha1_in_be)
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ else
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
} else {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 60425ac75d90..2b4c6333eb67 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -131,7 +131,16 @@
#define SS_SEED_LEN 192
#define SS_DATA_LEN 160
+/*
+ * struct ss_variant - Describe SS hardware variant
+ * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted.
+ */
+struct ss_variant {
+ bool sha1_in_be;
+};
+
struct sun4i_ss_ctx {
+ const struct ss_variant *variant;
void __iomem *base;
int irq;
struct clk *busclk;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 37d0b6c386a0..a5fd8975f3d3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -144,11 +144,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
- chan->op_dir = rctx->op_dir;
- chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
- chan->keylen = op->keylen;
-
addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
cet->t_key = cpu_to_le32(addr_key);
if (dma_mapping_error(ce->dev, addr_key)) {
@@ -394,7 +389,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 73a7649f915d..f72346a44e69 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -555,7 +555,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return -EINVAL;
}
- ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ce->base))
return PTR_ERR(ce->base);
@@ -624,7 +624,7 @@ error_alg:
error_irq:
sun8i_ce_pm_exit(ce);
error_pm:
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
}
@@ -638,7 +638,7 @@ static int sun8i_ce_remove(struct platform_device *pdev)
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
sun8i_ce_pm_exit(ce);
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 43db49ceafe4..8f8404c84a4d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -131,12 +131,8 @@ struct ce_task {
* @engine: ptr to the crypto_engine for this flow
* @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
- * @keylen: keylen for this flow operation
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
- * @method: current method for flow
- * @op_dir: direction (encrypt vs decrypt) of this flow
- * @op_mode: op_mode for this flow
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
* @stat_req: number of request done by this flow
@@ -145,12 +141,8 @@ struct sun8i_ce_flow {
struct crypto_engine *engine;
void *bounce_iv;
unsigned int ivlen;
- unsigned int keylen;
struct completion complete;
int status;
- u32 method;
- u32 op_dir;
- u32 op_mode;
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index f222979a5623..84d52fc3a2da 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
@@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 90997cc509b8..6b301afffd11 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -595,7 +595,7 @@ error_alg:
error_irq:
sun8i_ss_pm_exit(ss);
error_pm:
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
return err;
}
@@ -609,7 +609,7 @@ static int sun8i_ss_remove(struct platform_device *pdev)
debugfs_remove_recursive(ss->dbgfs_dir);
#endif
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
sun8i_ss_pm_exit(ss);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a42f8619589d..f7fc0c464125 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
struct dynamic_sa_ctl *sa;
int rc;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
@@ -292,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
- crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_skcipher_set_flags(cipher,
- crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -379,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(ctx->sw_cipher.aead,
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
- crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(cipher,
- crypto_aead_get_flags(ctx->sw_cipher.aead) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
/**
@@ -551,10 +532,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
struct dynamic_sa_ctl *sa;
int rc = 0;
- if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
return -EINVAL;
- }
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 7d6b695c4ab3..981de43ea5e2 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -169,7 +169,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
int i;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
- &dev->pdr_pa, GFP_ATOMIC);
+ &dev->pdr_pa, GFP_KERNEL);
if (!dev->pdr)
return -ENOMEM;
@@ -185,13 +185,13 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!dev->shadow_sa_pool)
return -ENOMEM;
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
- &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+ &dev->shadow_sr_pool_pa, GFP_KERNEL);
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
@@ -277,7 +277,7 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ &dev->gdr_pa, GFP_KERNEL);
if (!dev->gdr)
return -ENOMEM;
@@ -286,7 +286,8 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
{
- dma_free_coherent(dev->core_dev->device,
+ if (dev->gdr)
+ dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
dev->gdr, dev->gdr_pa);
}
@@ -354,20 +355,20 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- /* alloc memory for scatter descriptor ring */
- dev->sdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- &dev->sdr_pa, GFP_ATOMIC);
- if (!dev->sdr)
- return -ENOMEM;
-
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
- &dev->scatter_buffer_pa, GFP_ATOMIC);
+ &dev->scatter_buffer_pa, GFP_KERNEL);
if (!dev->scatter_buffer_va)
return -ENOMEM;
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_KERNEL);
+ if (!dev->sdr)
+ return -ENOMEM;
+
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
@@ -1439,16 +1440,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
+ goto err_build_sdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
-
- rc = crypto4xx_build_sdr(core_dev->dev);
- if (rc)
goto err_build_sdr;
/* Init tasklet for bottom half processing */
@@ -1493,7 +1493,6 @@ err_iomap:
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_pdr:
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index b90850d18965..cf9547602670 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
+ depends on HAS_IOMEM
default y if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e589015aac1c..9819dd50fbad 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index fa05fce1c0de..9d4ead2f7ebb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -289,7 +289,7 @@ static int meson_crypto_probe(struct platform_device *pdev)
error_alg:
meson_unregister_algs(mc);
error_flow:
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return err;
}
@@ -304,7 +304,7 @@ static int meson_crypto_remove(struct platform_device *pdev)
meson_unregister_algs(mc);
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return 0;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 91092504bc96..a6e14491e080 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -37,8 +38,6 @@
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
-#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#include "atmel-authenc.h"
@@ -89,7 +88,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
- bool has_ctr32;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -122,6 +120,7 @@ struct atmel_aes_ctr_ctx {
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
+ u32 blocks;
};
struct atmel_aes_gcm_ctx {
@@ -514,8 +513,37 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
}
}
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+}
+
+static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ int i;
+
+ /*
+ * The CTR transfer works in fragments of data of maximum 1 MByte
+ * because of the 16 bit CTR counter embedded in the IP. When reaching
+ * here, ctx->blocks contains the number of blocks of the last fragment
+ * processed, there is no need to explicit cast it to u16.
+ */
+ for (i = 0; i < ctx->blocks; i++)
+ crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+
+ memcpy(req->iv, ctx->iv, ivsize);
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
@@ -524,8 +552,13 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead)
- atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && !dd->ctx->is_aead &&
+ (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
+ if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ else
+ atmel_aes_ctr_update_req_iv(dd);
+ }
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -790,7 +823,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
int err;
memset(&config, 0, sizeof(config));
- config.direction = dir;
config.src_addr_width = addr_width;
config.dst_addr_width = addr_width;
config.src_maxburst = maxburst;
@@ -830,27 +862,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
return 0;
}
-static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
- enum dma_transfer_direction dir)
-{
- struct atmel_aes_dma *dma;
-
- switch (dir) {
- case DMA_MEM_TO_DEV:
- dma = &dd->src;
- break;
-
- case DMA_DEV_TO_MEM:
- dma = &dd->dst;
- break;
-
- default:
- return;
- }
-
- dmaengine_terminate_all(dma->chan);
-}
-
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
struct scatterlist *src,
struct scatterlist *dst,
@@ -909,25 +920,18 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
return -EINPROGRESS;
output_transfer_stop:
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+ dmaengine_terminate_sync(dd->dst.chan);
unmap:
atmel_aes_unmap(dd);
exit:
return atmel_aes_complete(dd, err);
}
-static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
-{
- atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
- atmel_aes_unmap(dd);
-}
-
static void atmel_aes_dma_callback(void *data)
{
struct atmel_aes_dev *dd = data;
- atmel_aes_dma_stop(dd);
+ atmel_aes_unmap(dd);
dd->is_async = true;
(void)dd->resume(dd);
}
@@ -1004,19 +1008,14 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
atmel_aes_transfer_complete);
}
-static inline struct atmel_aes_ctr_ctx *
-atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct atmel_aes_ctr_ctx, base);
-}
-
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
- u32 ctr, blocks;
size_t datalen;
+ u32 ctr;
+ u16 start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@@ -1026,29 +1025,19 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Compute data length. */
datalen = req->cryptlen - ctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+ ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
- if (dd->caps.has_ctr32) {
- /* Check 32bit counter overflow. */
- u32 start = ctr;
- u32 end = start + blocks - 1;
-
- if (end < start) {
- ctr |= 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
- } else {
- /* Check 16bit counter overflow. */
- u16 start = ctr & 0xffff;
- u16 end = start + (u16)blocks - 1;
-
- if (blocks >> 16 || end < start) {
- ctr |= 0xffff;
- datalen = AES_BLOCK_SIZE * (0x10000-start);
- fragmented = true;
- }
+
+ /* Check 16bit counter overflow. */
+ start = ctr & 0xffff;
+ end = start + ctx->blocks - 1;
+
+ if (ctx->blocks >> 16 || end < start) {
+ ctr |= 0xffff;
+ datalen = AES_BLOCK_SIZE * (0x10000 - start);
+ fragmented = true;
}
+
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@@ -1131,7 +1120,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
- if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+ !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1150,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1275,12 +1263,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "atmel-ecb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1292,12 +1276,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "atmel-cbc-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1310,12 +1290,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ofb(aes)",
.base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1328,12 +1304,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1346,12 +1318,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb32(aes)",
.base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1364,12 +1332,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb16(aes)",
.base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1382,12 +1346,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb8(aes)",
.base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1400,12 +1360,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_ctr_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1420,12 +1376,8 @@ static struct skcipher_alg aes_algs[] = {
static struct skcipher_alg aes_cfb64_alg = {
.base.cra_name = "cfb64(aes)",
.base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB64_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1762,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1776,21 +1726,7 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 4:
- case 8:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return crypto_gcm_check_authsize(authsize);
}
static int atmel_aes_gcm_encrypt(struct aead_request *req)
@@ -1825,12 +1761,8 @@ static struct aead_alg aes_gcm_alg = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "atmel-gcm-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
};
@@ -1947,12 +1879,8 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aes_xts_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "atmel-xts-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2113,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
- u32 flags;
int err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -2123,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
goto badkey;
/* Save auth key. */
- flags = crypto_aead_get_flags(tfm);
err = atmel_sha_authenc_setkey(ctx->auth,
keys.authkey, keys.authkeylen,
- &flags);
- crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
+ crypto_aead_get_flags(tfm));
if (err) {
memzero_explicit(&keys, sizeof(keys));
return err;
@@ -2141,7 +2066,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2252,12 +2176,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2272,12 +2192,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2292,12 +2208,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2312,12 +2224,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2332,12 +2240,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
};
@@ -2364,47 +2268,30 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
free_page((unsigned long)dd->buf);
}
-static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- struct at_dma_slave *slave;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- slave = &pdata->dma_slave->rxdata;
- dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "tx");
- if (!dd->src.chan)
+ dd->src.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->src.chan)) {
+ ret = PTR_ERR(dd->src.chan);
goto err_dma_in;
+ }
- slave = &pdata->dma_slave->txdata;
- dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "rx");
- if (!dd->dst.chan)
+ dd->dst.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dst.chan)) {
+ ret = PTR_ERR(dd->dst.chan);
goto err_dma_out;
+ }
return 0;
err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2469,29 +2356,45 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
crypto_unregister_skcipher(&aes_algs[i]);
}
+static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
+{
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_alignmask = 0xf;
+ alg->cra_priority = ATMEL_AES_PRIORITY;
+ alg->cra_module = THIS_MODULE;
+}
+
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_algs[i].base);
+
err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
+ atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
+
err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
if (dd->caps.has_gcm) {
+ atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
+
err = crypto_register_aead(&aes_gcm_alg);
if (err)
goto err_aes_gcm_alg;
}
if (dd->caps.has_xts) {
+ atmel_aes_crypto_alg_init(&aes_xts_alg.base);
+
err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
@@ -2500,6 +2403,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
+
err = crypto_register_aead(&aes_authenc_algs[i]);
if (err)
goto err_aes_authenc_alg;
@@ -2533,7 +2438,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
- dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2544,7 +2448,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2553,7 +2456,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
@@ -2577,65 +2479,18 @@ static const struct of_device_id atmel_aes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-
-static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave) {
- devm_kfree(&pdev->dev, pdata);
- return ERR_PTR(-ENOMEM);
- }
-
- return pdata;
-}
-#else
-static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_aes_of_init(pdev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto aes_dd_err;
- }
- }
-
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto aes_dd_err;
- }
-
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
- if (aes_dd == NULL) {
- err = -ENOMEM;
- goto aes_dd_err;
- }
+ if (!aes_dd)
+ return -ENOMEM;
aes_dd->dev = dev;
@@ -2656,7 +2511,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2664,14 +2519,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd->irq = platform_get_irq(pdev, 0);
if (aes_dd->irq < 0) {
err = aes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
IRQF_SHARED, "atmel-aes", aes_dd);
if (err) {
dev_err(dev, "unable to request aes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2679,40 +2534,40 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(aes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
err = atmel_aes_hw_version_init(aes_dd);
if (err)
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
- goto err_aes_buff;
+ goto err_iclk_unprepare;
- err = atmel_aes_dma_init(aes_dd, pdata);
+ err = atmel_aes_dma_init(aes_dd);
if (err)
- goto err_aes_dma;
+ goto err_buff_cleanup;
spin_lock(&atmel_aes.lock);
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
@@ -2733,17 +2588,13 @@ err_algs:
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
-err_aes_dma:
+err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
-err_aes_buff:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(aes_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
-aes_dd_err:
- if (err != -EPROBE_DEFER)
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index d6de810df44f..c6530a1c8c20 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void);
struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode);
void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags);
+ const u8 *key, unsigned int keylen, u32 flags);
int atmel_sha_authenc_schedule(struct ahash_request *req,
struct atmel_sha_authenc_ctx *auth,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8ea0e4bcde0d..e536e2a6bbd8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -36,10 +37,11 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
#include "atmel-authenc.h"
+#define ATMEL_SHA_PRIORITY 300
+
/* SHA flags */
#define SHA_FLAGS_BUSY BIT(0)
#define SHA_FLAGS_FINAL BIT(1)
@@ -134,7 +136,6 @@ struct atmel_sha_dev {
void __iomem *io_base;
spinlock_t lock;
- int err;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
@@ -851,7 +852,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
0, final);
}
-static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -870,8 +871,6 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
ctx->block_size, DMA_TO_DEVICE);
}
-
- return 0;
}
static int atmel_sha_update_req(struct atmel_sha_dev *dd)
@@ -1025,7 +1024,6 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
dd->flags |= SHA_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -1036,9 +1034,13 @@ static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
}
-static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
{
- atmel_sha_hw_init(dd);
+ int err;
+
+ err = atmel_sha_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_sha_get_version(dd);
@@ -1046,6 +1048,8 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable(dd->iclk);
+
+ return 0;
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1248,130 +1252,66 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void atmel_sha_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_cra_init;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->finup = atmel_sha_finup;
+ alg->digest = atmel_sha_digest;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_1_256_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "atmel-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha1",
+ .halg.base.cra_driver_name = "atmel-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "atmel-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha256",
+ .halg.base.cra_driver_name = "atmel-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
};
static struct ahash_alg sha_224_alg = {
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "atmel-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha224",
+ .halg.base.cra_driver_name = "atmel-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
};
static struct ahash_alg sha_384_512_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "atmel-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha384",
+ .halg.base.cra_driver_name = "atmel-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "atmel-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha512",
+ .halg.base.cra_driver_name = "atmel-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -1395,10 +1335,6 @@ static int atmel_sha_done(struct atmel_sha_dev *dd)
if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
atmel_sha_update_dma_stop(dd);
- if (dd->err) {
- err = dd->err;
- goto finish;
- }
}
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
/* hash or semi-hash ready */
@@ -1493,7 +1429,6 @@ static void atmel_sha_dma_callback2(void *data)
struct scatterlist *sg;
int nents;
- dmaengine_terminate_all(dma->chan);
dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
sg = dma->sg;
@@ -1918,12 +1853,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return 0;
+ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
}
static int atmel_sha_hmac_init(struct ahash_request *req)
@@ -2084,131 +2014,61 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
atmel_sha_hmac_key_release(&hmac->hkey);
}
+static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
+ alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_hmac_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->digest = atmel_sha_hmac_digest;
+ alg->setkey = atmel_sha_hmac_setkey;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_hmac_algs[] = {
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "atmel-hmac-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha1)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "atmel-hmac-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha224)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "atmel-hmac-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha256)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "atmel-hmac-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha384)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "atmel-hmac-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha512)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -2347,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags)
+ const u8 *key, unsigned int keylen, u32 flags)
{
struct crypto_ahash *tfm = auth->tfm;
- int err;
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(tfm, key, keylen);
- *flags = crypto_ahash_get_flags(tfm);
-
- return err;
+ crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
+ return crypto_ahash_setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
@@ -2561,12 +2416,16 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ atmel_sha_alg_init(&sha_1_256_algs[i]);
+
err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
goto err_sha_1_256_algs;
}
if (dd->caps.has_sha224) {
+ atmel_sha_alg_init(&sha_224_alg);
+
err = crypto_register_ahash(&sha_224_alg);
if (err)
goto err_sha_224_algs;
@@ -2574,6 +2433,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_sha_384_512) {
for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ atmel_sha_alg_init(&sha_384_512_algs[i]);
+
err = crypto_register_ahash(&sha_384_512_algs[i]);
if (err)
goto err_sha_384_512_algs;
@@ -2582,6 +2443,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_hmac) {
for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
+ atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
+
err = crypto_register_ahash(&sha_hmac_algs[i]);
if (err)
goto err_sha_hmac_algs;
@@ -2608,35 +2471,14 @@ err_sha_1_256_algs:
return err;
}
-static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ dev_err(dd->dev, "DMA channel is not available\n");
+ return PTR_ERR(dd->dma_lch_in.chan);
}
-}
-static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask_in;
-
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan) {
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
- }
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
SHA_REG_DIN(0);
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -2709,49 +2551,18 @@ static const struct of_device_id atmel_sha_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-
-static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
int err;
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
- if (sha_dd == NULL) {
- err = -ENOMEM;
- goto sha_dd_err;
- }
+ if (!sha_dd)
+ return -ENOMEM;
sha_dd->dev = dev;
@@ -2772,7 +2583,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (!sha_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2780,14 +2591,14 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd->irq = platform_get_irq(pdev, 0);
if (sha_dd->irq < 0) {
err = sha_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
IRQF_SHARED, "atmel-sha", sha_dd);
if (err) {
dev_err(dev, "unable to request sha irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2795,41 +2606,30 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(sha_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
- atmel_sha_hw_version_init(sha_dd);
+ err = atmel_sha_hw_version_init(sha_dd);
+ if (err)
+ goto err_iclk_unprepare;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_sha_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto iclk_unprepare;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto iclk_unprepare;
- }
- err = atmel_sha_dma_init(sha_dd, pdata);
+ err = atmel_sha_dma_init(sha_dd);
if (err)
- goto err_sha_dma;
+ goto err_iclk_unprepare;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@@ -2855,14 +2655,11 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-err_sha_dma:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(sha_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
-sha_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 0c1f79b30fc1..ed40dbb98c6b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -30,31 +31,32 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
+#define ATMEL_TDES_PRIORITY 300
+
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x00ff
-#define TDES_FLAGS_ENCRYPT BIT(0)
-#define TDES_FLAGS_CBC BIT(1)
-#define TDES_FLAGS_CFB BIT(2)
-#define TDES_FLAGS_CFB8 BIT(3)
-#define TDES_FLAGS_CFB16 BIT(4)
-#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_CFB64 BIT(6)
-#define TDES_FLAGS_OFB BIT(7)
-
-#define TDES_FLAGS_INIT BIT(16)
-#define TDES_FLAGS_FAST BIT(17)
-#define TDES_FLAGS_BUSY BIT(18)
-#define TDES_FLAGS_DMA BIT(19)
+/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
+#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
+#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
+#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
+#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
+#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
+#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
+#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
+#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
+#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
+
+#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
+
+#define TDES_FLAGS_INIT BIT(3)
+#define TDES_FLAGS_FAST BIT(4)
+#define TDES_FLAGS_BUSY BIT(5)
+#define TDES_FLAGS_DMA BIT(6)
#define ATMEL_TDES_QUEUE_LENGTH 50
@@ -100,7 +102,6 @@ struct atmel_tdes_dev {
int irq;
unsigned long flags;
- int err;
spinlock_t lock;
struct crypto_queue queue;
@@ -189,7 +190,7 @@ static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
}
static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
- u32 *value, int count)
+ const u32 *value, int count)
{
for (; count--; value++, offset += 4)
atmel_tdes_write(dd, offset, *value);
@@ -226,7 +227,6 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
if (!(dd->flags & TDES_FLAGS_INIT)) {
atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
dd->flags |= TDES_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -237,9 +237,13 @@ static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
}
-static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
{
- atmel_tdes_hw_init(dd);
+ int err;
+
+ err = atmel_tdes_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_tdes_get_version(dd);
@@ -247,6 +251,8 @@ static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
+
+ return 0;
}
static void atmel_tdes_dma_callback(void *data)
@@ -260,7 +266,7 @@ static void atmel_tdes_dma_callback(void *data)
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
- u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+ u32 valmr = TDES_MR_SMOD_PDC;
err = atmel_tdes_hw_init(dd);
@@ -282,36 +288,15 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_TDESMOD_DES;
}
- if (dd->flags & TDES_FLAGS_CBC) {
- valmr |= TDES_MR_OPMOD_CBC;
- } else if (dd->flags & TDES_FLAGS_CFB) {
- valmr |= TDES_MR_OPMOD_CFB;
-
- if (dd->flags & TDES_FLAGS_CFB8)
- valmr |= TDES_MR_CFBS_8b;
- else if (dd->flags & TDES_FLAGS_CFB16)
- valmr |= TDES_MR_CFBS_16b;
- else if (dd->flags & TDES_FLAGS_CFB32)
- valmr |= TDES_MR_CFBS_32b;
- else if (dd->flags & TDES_FLAGS_CFB64)
- valmr |= TDES_MR_CFBS_64b;
- } else if (dd->flags & TDES_FLAGS_OFB) {
- valmr |= TDES_MR_OPMOD_OFB;
- }
-
- if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
- valmr |= TDES_MR_CYPHER_ENC;
+ valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
- atmel_tdes_write(dd, TDES_CR, valcr);
atmel_tdes_write(dd, TDES_MR, valmr);
atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
dd->ctx->keylen >> 2);
- if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
- }
return 0;
}
@@ -397,11 +382,11 @@ static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -411,12 +396,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
len32 = DIV_ROUND_UP(length, sizeof(u8));
- else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+ break;
+
+ case TDES_FLAGS_CFB16:
len32 = DIV_ROUND_UP(length, sizeof(u16));
- else
+ break;
+
+ default:
len32 = DIV_ROUND_UP(length, sizeof(u32));
+ break;
+ }
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -433,13 +425,14 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
+ enum dma_slave_buswidth addr_width;
dd->dma_size = length;
@@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if (dd->flags & TDES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & TDES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- } else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
+ addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+
+ case TDES_FLAGS_CFB16:
+ addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+
+ default:
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
}
+ dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
+ dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
+
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
@@ -504,8 +497,6 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(
- crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -561,9 +552,9 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
dd->total -= count;
if (dd->caps.has_dma)
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
else
- err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
@@ -600,12 +591,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
- atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
req->base.complete(&req->base, err);
}
@@ -699,35 +692,44 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
- if (mode & TDES_FLAGS_CFB8) {
+ switch (mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB16) {
+ break;
+
+ case TDES_FLAGS_CFB16:
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB32) {
+ break;
+
+ case TDES_FLAGS_CFB32:
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
- } else {
+ break;
+
+ default:
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
ctx->block_size = DES_BLOCK_SIZE;
+ break;
}
rctx->mode = mode;
- if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
+ !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -739,33 +741,17 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
return atmel_tdes_handle_queue(ctx->dd, req);
}
-static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan)
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ ret = PTR_ERR(dd->dma_lch_in.chan);
goto err_dma_in;
+ }
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
TDES_IDATA1R;
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -776,12 +762,12 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
- dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
- if (!dd->dma_lch_out.chan)
+ dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_out.chan)) {
+ ret = PTR_ERR(dd->dma_lch_out.chan);
goto err_dma_out;
+ }
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
TDES_ODATA1R;
dd->dma_lch_out.dma_conf.src_maxburst = 1;
@@ -797,8 +783,8 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -841,17 +827,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, 0);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
}
static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
@@ -860,50 +846,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
}
static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
}
static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
}
static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
}
static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
}
static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
@@ -925,18 +908,23 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
+static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
+{
+ alg->base.cra_priority = ATMEL_TDES_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = atmel_tdes_init_tfm;
+}
+
static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "atmel-ecb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = atmel_des_setkey,
@@ -946,14 +934,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "atmel-cbc-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -964,14 +947,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb(des)",
.base.cra_driver_name = "atmel-cfb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -982,14 +960,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb8(des)",
.base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1000,14 +973,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb16(des)",
.base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x1,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1018,14 +986,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb32(des)",
.base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x3,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1036,14 +999,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "atmel-ofb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1054,14 +1012,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1071,14 +1024,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "atmel-cbc-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1089,14 +1037,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des3_ede)",
.base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1123,8 +1066,6 @@ static void atmel_tdes_done_task(unsigned long data)
else
err = atmel_tdes_crypt_dma_stop(dd);
- err = dd->err ? : err;
-
if (dd->total && !err) {
if (dd->flags & TDES_FLAGS_FAST) {
dd->in_sg = sg_next(dd->in_sg);
@@ -1173,6 +1114,8 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+ atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
+
err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
@@ -1214,49 +1157,18 @@ static const struct of_device_id atmel_tdes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-
-static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
int err;
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
- if (tdes_dd == NULL) {
- err = -ENOMEM;
- goto tdes_dd_err;
- }
+ if (!tdes_dd)
+ return -ENOMEM;
tdes_dd->dev = dev;
@@ -1277,7 +1189,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (!tdes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1285,14 +1197,14 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->irq = platform_get_irq(pdev, 0);
if (tdes_dd->irq < 0) {
err = tdes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
IRQF_SHARED, "atmel-tdes", tdes_dd);
if (err) {
dev_err(dev, "unable to request tdes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -1300,41 +1212,30 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
- atmel_tdes_hw_version_init(tdes_dd);
+ err = atmel_tdes_hw_version_init(tdes_dd);
+ if (err)
+ goto err_tasklet_kill;
atmel_tdes_get_cap(tdes_dd);
err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_buff;
+ goto err_tasklet_kill;
if (tdes_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_tdes_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto err_pdata;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto err_pdata;
- }
- err = atmel_tdes_dma_init(tdes_dd, pdata);
+ err = atmel_tdes_dma_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_buff_cleanup;
dev_info(dev, "using %s, %s for DMA transfers\n",
dma_chan_name(tdes_dd->dma_lch_in.chan),
@@ -1359,15 +1260,11 @@ err_algs:
spin_unlock(&atmel_tdes.lock);
if (tdes_dd->caps.has_dma)
atmel_tdes_dma_cleanup(tdes_dd);
-err_tdes_dma:
-err_pdata:
+err_buff_cleanup:
atmel_tdes_buff_cleanup(tdes_dd);
-err_tdes_buff:
-res_err:
+err_tasklet_kill:
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
-tdes_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 4b20606983a4..fcf1effc7661 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
{
struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
- if (len != 16 && len != 24 && len != 32) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -1;
- }
+ if (len != 16 && len != 24 && len != 32)
+ return -EINVAL;
ctx->key_length = len;
@@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 32:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 64:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1564a6f8c9cb..c8b9408541a9 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -2894,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2916,7 +2910,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2967,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
keylen + ctx->salt_len);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2992,7 +2980,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 87053e46c788..fac5b2e26610 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -130,13 +130,13 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
scatterlist crypto API to the SEC4 via job ring.
config CRYPTO_DEV_FSL_CAAM_PKC_API
- bool "Register public key cryptography implementations with Crypto API"
- default y
- select CRYPTO_RSA
- help
- Selecting this will allow SEC Public key support for RSA.
- Supported cryptographic primitives: encryption, decryption,
- signature and verification.
+ bool "Register public key cryptography implementations with Crypto API"
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
config CRYPTO_DEV_FSL_CAAM_RNG_API
bool "Register caam device for hwrng API"
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2912006b946b..ef1a65f4fc92 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -619,7 +617,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 8e3449670d2f..4a29e0ef9d63 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -268,7 +268,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
@@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(jrdev, "key size mismatch\n");
- goto badkey;
+ return -EINVAL;
}
ctx->cdata.keylen = keylen;
@@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 3443f6d6dd83..28669cbecf77 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
+ goto out;
err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
aead_setkey(aead, key, keylen);
@@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE)
return -EINVAL;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(dev, "key size mismatch\n");
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2481,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
@@ -2998,15 +2972,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -3018,42 +2990,17 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
struct dpaa2_sg_entry *qm_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(dev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, state->buf_dma)) {
dev_err(dev, "unable to map buf\n");
@@ -3304,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
return ret;
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -3321,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -3383,9 +3329,17 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3440,9 +3394,17 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3464,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state), last_buflen;
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
@@ -3524,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req)
if (mapped_nents) {
sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
} else {
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
true);
@@ -3566,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -3592,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -3663,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, qm_sg_src_index;
int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -3852,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = -ENOMEM;
@@ -3925,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int qm_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -3977,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
-
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -4029,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4055,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -4151,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -4220,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
@@ -4257,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4288,10 +4229,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -4321,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -4348,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 65399cb2a770..8d9143407fc5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,15 +107,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -127,31 +125,6 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
static inline bool is_cmac_aes(u32 algtype)
{
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
@@ -183,12 +156,12 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map buf\n");
@@ -500,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return ahash_set_sh_desc(ahash);
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -510,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
- if (keylen != AES_KEYSIZE_128) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
@@ -533,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
/* key is immediate data for all cmac shared descriptors */
ctx->adata.key_virt = key;
@@ -578,7 +546,7 @@ static inline void ahash_unmap(struct device *dev,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -643,9 +611,17 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -703,9 +679,17 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -786,18 +770,16 @@ static int ahash_update_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
@@ -868,10 +850,6 @@ static int ahash_update_ctx(struct ahash_request *req)
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -901,14 +879,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -925,7 +900,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -991,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
@@ -1148,8 +1123,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -1207,11 +1182,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -1278,12 +1252,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
- if (*next_buflen) {
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- }
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1317,14 +1285,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1342,7 +1307,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1428,8 +1393,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
@@ -1491,10 +1457,6 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
desc = edesc->hw_desc;
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1517,14 +1479,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1548,10 +1510,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -1581,16 +1542,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -1608,9 +1561,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d7c3c3805693..7139366da016 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -99,10 +99,13 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1 ||
/*
- * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq")) {
+ of_machine_is_compatible("fsl,imx8mq") ||
+ of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -508,7 +511,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
- { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
@@ -671,11 +674,9 @@ static int caam_probe(struct platform_device *pdev)
of_node_put(np);
if (!ctrlpriv->mc_en)
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
handle_imx6_err005766(&ctrl->mcr);
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1ad66677d88e..1be1adffff1d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 6f80cc3b5c84..dce5423a5883 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
union fc_ctx_flags flags;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
/* fill crypto context */
fctx = nctx->u.fctx;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 97af4d50d003..18088b0a2257 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int aes_keylen;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
@@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
keylen /= 2;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
fctx = nctx->u.fctx;
/* copy KEY2 */
@@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 6b86f1e6d634..db362fe472ea 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,7 +8,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o
ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
-ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
+ sev-dev.o \
+ tee-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 32f19f402073..5eba7ee49e81 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff50ee80d223..9e8f07c1afac 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 33328a153225..51e12fbd1159 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 453b9797f93f..474e6f1a6a84 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
key_len = digest_size;
} else {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0186b3df4c87..0d5576f6ad21 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = {
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv3 = {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 7ca2d3408e7a..e95e7aa5dbf1 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -2,57 +2,20 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/spinlock_types.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/hw_random.h>
-#include <linux/ccp.h>
-#include <linux/firmware.h>
-
-#include <asm/smp.h>
+#include <linux/irqreturn.h>
#include "sp-dev.h"
#include "psp-dev.h"
+#include "sev-dev.h"
+#include "tee-dev.h"
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
-#define SEV_FW_NAME_SIZE 64
-
-static DEFINE_MUTEX(sev_cmd_mutex);
-static struct sev_misc_dev *misc_dev;
-static struct psp_device *psp_master;
-
-static int psp_cmd_timeout = 100;
-module_param(psp_cmd_timeout, int, 0644);
-MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
-
-static int psp_probe_timeout = 5;
-module_param(psp_probe_timeout, int, 0644);
-MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
-
-static bool psp_dead;
-static int psp_timeout;
-
-static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
-{
- if (psp_master->api_major > maj)
- return true;
- if (psp_master->api_major == maj && psp_master->api_minor >= min)
- return true;
- return false;
-}
+struct psp_device *psp_master;
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
@@ -75,902 +38,95 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
{
struct psp_device *psp = data;
unsigned int status;
- int reg;
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
- /* Check if it is command completion: */
- if (!(status & PSP_CMD_COMPLETE))
- goto done;
+ /* invoke subdevice interrupt handlers */
+ if (status) {
+ if (psp->sev_irq_handler)
+ psp->sev_irq_handler(irq, psp->sev_irq_data, status);
- /* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
- psp->sev_int_rcvd = 1;
- wake_up(&psp->sev_int_queue);
+ if (psp->tee_irq_handler)
+ psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
-done:
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
-static int sev_wait_cmd_ioc(struct psp_device *psp,
- unsigned int *reg, unsigned int timeout)
-{
- int ret;
-
- ret = wait_event_timeout(psp->sev_int_queue,
- psp->sev_int_rcvd, timeout * HZ);
- if (!ret)
- return -ETIMEDOUT;
-
- *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
-
- return 0;
-}
-
-static int sev_cmd_buffer_len(int cmd)
-{
- switch (cmd) {
- case SEV_CMD_INIT: return sizeof(struct sev_data_init);
- case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
- case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
- case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
- case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
- case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
- case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
- case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
- case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
- case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
- case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
- case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
- case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
- case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
- case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
- case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
- case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
- case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
- case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
- case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
- case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
- case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
- case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
- case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
- case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
- default: return 0;
- }
-
- return 0;
-}
-
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
-{
- struct psp_device *psp = psp_master;
- unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp_dead)
- return -EBUSY;
-
- /* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
-
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
- cmd, phys_msb, phys_lsb, psp_timeout);
-
- print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
- iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
-
- psp->sev_int_rcvd = 0;
-
- reg = cmd;
- reg <<= PSP_CMDRESP_CMD_SHIFT;
- reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
-
- /* wait for command completion */
- ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
- if (ret) {
- if (psp_ret)
- *psp_ret = 0;
-
- dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
- psp_dead = true;
-
- return ret;
- }
-
- psp_timeout = psp_cmd_timeout;
-
- if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
-
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
- ret = -EIO;
- }
-
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- return ret;
-}
-
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_do_cmd_locked(cmd, data, psp_ret);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int __sev_platform_init_locked(int *error)
-{
- struct psp_device *psp = psp_master;
- int rc = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp->sev_state == SEV_STATE_INIT)
- return 0;
-
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
- if (rc)
- return rc;
-
- psp->sev_state = SEV_STATE_INIT;
-
- /* Prepare for first SEV guest launch after INIT */
- wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
- return rc;
-
- dev_dbg(psp->dev, "SEV firmware initialized\n");
-
- return rc;
-}
-
-int sev_platform_init(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(sev_platform_init);
-
-static int __sev_platform_shutdown_locked(int *error)
-{
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
- return ret;
-
- psp_master->sev_state = SEV_STATE_UNINIT;
- dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
-
- return ret;
-}
-
-static int sev_platform_shutdown(int *error)
+static unsigned int psp_get_capability(struct psp_device *psp)
{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int sev_get_platform_state(int *state, int *error)
-{
- int rc;
-
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &psp_master->status_cmd_buf, error);
- if (rc)
- return rc;
-
- *state = psp_master->status_cmd_buf.state;
- return rc;
-}
-
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
-{
- int state, rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
/*
- * The SEV spec requires that FACTORY_RESET must be issued in
- * UNINIT state. Before we go further lets check if any guest is
- * active.
- *
- * If FW is in WORKING state then deny the request otherwise issue
- * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
- *
- */
- rc = sev_get_platform_state(&state, &argp->error);
- if (rc)
- return rc;
-
- if (state == SEV_STATE_WORKING)
- return -EBUSY;
-
- if (state == SEV_STATE_INIT) {
- rc = __sev_platform_shutdown_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_status *data = &psp_master->status_cmd_buf;
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
- if (ret)
- return ret;
-
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
- ret = -EFAULT;
-
- return ret;
-}
-
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
-{
- int rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
- void *blob = NULL;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* userspace wants to query CSR length */
- if (!input.address || !input.length)
- goto cmd;
-
- /* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
-
- blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->address = __psp_pa(blob);
- data->len = input.length;
-
-cmd:
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_blob;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
-
- /* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_blob;
- }
-
- if (blob) {
- if (copy_to_user((void __user *)input.address, blob, input.length))
- ret = -EFAULT;
- }
-
-e_free_blob:
- kfree(blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
-{
- if (!uaddr || !len)
- return ERR_PTR(-EINVAL);
-
- /* verify that blob length does not exceed our limit */
- if (len > SEV_FW_BLOB_MAX_SIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user((void __user *)(uintptr_t)uaddr, len);
-}
-EXPORT_SYMBOL_GPL(psp_copy_user_blob);
-
-static int sev_get_api_version(void)
-{
- struct sev_user_data_status *status;
- int error = 0, ret;
-
- status = &psp_master->status_cmd_buf;
- ret = sev_platform_status(status, &error);
- if (ret) {
- dev_err(psp_master->dev,
- "SEV: failed to get status. Error: %#x\n", error);
- return 1;
- }
-
- psp_master->api_major = status->api_major;
- psp_master->api_minor = status->api_minor;
- psp_master->build = status->build;
- psp_master->sev_state = status->state;
-
- return 0;
-}
-
-static int sev_get_firmware(struct device *dev,
- const struct firmware **firmware)
-{
- char fw_name_specific[SEV_FW_NAME_SIZE];
- char fw_name_subset[SEV_FW_NAME_SIZE];
-
- snprintf(fw_name_specific, sizeof(fw_name_specific),
- "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
-
- snprintf(fw_name_subset, sizeof(fw_name_subset),
- "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
- boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
-
- /* Check for SEV FW for a particular model.
- * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
- *
- * or
- *
- * Check for SEV FW common to a subset of models.
- * Ex. amd_sev_fam17h_model0xh.sbin for
- * Family 17h Model 00h -- Family 17h Model 0Fh
- *
- * or
- *
- * Fall-back to using generic name: sev.fw
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
*/
- if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
- (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
- (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return 0;
-
- return -ENOENT;
-}
-
-/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
-static int sev_update_firmware(struct device *dev)
-{
- struct sev_data_download_firmware *data;
- const struct firmware *firmware;
- int ret, error, order;
- struct page *p;
- u64 data_size;
-
- if (sev_get_firmware(dev, &firmware) == -ENOENT) {
- dev_dbg(dev, "No SEV firmware file present\n");
- return -1;
- }
-
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
- p = alloc_pages(GFP_KERNEL, order);
- if (!p) {
- ret = -1;
- goto fw_err;
}
- /*
- * Copy firmware data to a kernel allocated contiguous
- * memory region.
- */
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
-
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
-
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
- if (ret)
- dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
-
- __free_pages(p, order);
-
-fw_err:
- release_firmware(firmware);
-
- return ret;
+ return val;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int psp_check_sev_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
- void *pek_blob, *oca_blob;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* copy PEK certificate blobs from userspace */
- pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
-
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
-
- /* copy PEK certificate blobs from userspace */
- oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
- if (IS_ERR(oca_blob)) {
- ret = PTR_ERR(oca_blob);
- goto e_free_pek;
- }
-
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
-
- /* If platform is not in INIT state then transition it to INIT */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_oca;
+ /* Check if device supports SEV feature */
+ if (!(capability & 1)) {
+ dev_dbg(psp->dev, "psp does not support SEV\n");
+ return -ENODEV;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
-
-e_free_oca:
- kfree(oca_blob);
-e_free_pek:
- kfree(pek_blob);
-e_free:
- kfree(data);
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+static int psp_check_tee_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
- void *id_blob = NULL;
- int ret;
-
- /* SEV GET_ID is available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (input.address && input.length) {
- id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->address = __psp_pa(id_blob);
- data->len = input.length;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
-
- /*
- * Firmware will return the length of the ID value (either the minimum
- * required length or the actual length written), return it to the user.
- */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free;
- }
-
- if (id_blob) {
- if (copy_to_user((void __user *)input.address,
- id_blob, data->len)) {
- ret = -EFAULT;
- goto e_free;
- }
+ /* Check if device supports TEE feature */
+ if (!(capability & 2)) {
+ dev_dbg(psp->dev, "psp does not support TEE\n");
+ return -ENODEV;
}
-e_free:
- kfree(id_blob);
- kfree(data);
-
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+static int psp_check_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_data_get_id *data;
- u64 data_size, user_size;
- void *id_blob, *mem;
- int ret;
+ int sev_support = psp_check_sev_support(psp, capability);
+ int tee_support = psp_check_tee_support(psp, capability);
- /* SEV GET_ID available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- /* SEV FW expects the buffer it fills with the ID to be
- * 8-byte aligned. Memory allocated should be enough to
- * hold data structure + alignment padding + memory
- * where SEV FW writes the ID.
- */
- data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
- user_size = sizeof(struct sev_user_data_get_id);
-
- mem = kzalloc(data_size + user_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- data = mem;
- id_blob = mem + data_size;
-
- data->address = __psp_pa(id_blob);
- data->len = user_size;
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
- if (!ret) {
- if (copy_to_user((void __user *)argp->data, id_blob, data->len))
- ret = -EFAULT;
- }
-
- kfree(mem);
+ /* Return error if device neither supports SEV nor TEE */
+ if (sev_support && tee_support)
+ return -ENODEV;
- return ret;
+ return 0;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int psp_init(struct psp_device *psp, unsigned int capability)
{
- struct sev_user_data_pdh_cert_export input;
- void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
+ if (!psp_check_sev_support(psp, capability)) {
+ ret = sev_dev_init(psp);
if (ret)
return ret;
}
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Userspace wants to query the certificate length. */
- if (!input.pdh_cert_address ||
- !input.pdh_cert_len ||
- !input.cert_chain_address)
- goto cmd;
-
- /* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- /* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
-
- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
- if (!cert_blob) {
- ret = -ENOMEM;
- goto e_free_pdh;
- }
-
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
-
-cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
-
- /* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_cert;
- }
-
- if (pdh_blob) {
- if (copy_to_user((void __user *)input.pdh_cert_address,
- pdh_blob, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free_cert;
- }
- }
-
- if (cert_blob) {
- if (copy_to_user((void __user *)input.cert_chain_address,
- cert_blob, input.cert_chain_len))
- ret = -EFAULT;
- }
-
-e_free_cert:
- kfree(cert_blob);
-e_free_pdh:
- kfree(pdh_blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct sev_issue_cmd input;
- int ret = -EFAULT;
-
- if (!psp_master)
- return -ENODEV;
-
- if (ioctl != SEV_ISSUE_CMD)
- return -EINVAL;
-
- if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
- return -EFAULT;
-
- if (input.cmd > SEV_MAX)
- return -EINVAL;
-
- mutex_lock(&sev_cmd_mutex);
-
- switch (input.cmd) {
-
- case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
- break;
- case SEV_PLATFORM_STATUS:
- ret = sev_ioctl_do_platform_status(&input);
- break;
- case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
- break;
- case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
- break;
- case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
- break;
- case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
- break;
- case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
- break;
- case SEV_GET_ID:
- pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
- ret = sev_ioctl_do_get_id(&input);
- break;
- case SEV_GET_ID2:
- ret = sev_ioctl_do_get_id2(&input);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
- ret = -EFAULT;
-out:
- mutex_unlock(&sev_cmd_mutex);
-
- return ret;
-}
-
-static const struct file_operations sev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sev_ioctl,
-};
-
-int sev_platform_status(struct sev_user_data_status *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_platform_status);
-
-int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_deactivate);
-
-int sev_guest_activate(struct sev_data_activate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_activate);
-
-int sev_guest_decommission(struct sev_data_decommission *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_decommission);
-
-int sev_guest_df_flush(int *error)
-{
- return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_df_flush);
-
-static void sev_exit(struct kref *ref)
-{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
- misc_deregister(&misc_dev->misc);
-}
-
-static int sev_misc_init(struct psp_device *psp)
-{
- struct device *dev = psp->dev;
- int ret;
-
- /*
- * SEV feature support can be detected on multiple devices but the SEV
- * FW commands must be issued on the master. During probe, we do not
- * know the master hence we create /dev/sev on the first device probe.
- * sev_do_cmd() finds the right master device to which to issue the
- * command to the firmware.
- */
- if (!misc_dev) {
- struct miscdevice *misc;
-
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
- if (!misc_dev)
- return -ENOMEM;
-
- misc = &misc_dev->misc;
- misc->minor = MISC_DYNAMIC_MINOR;
- misc->name = DEVICE_NAME;
- misc->fops = &sev_fops;
-
- ret = misc_register(misc);
+ if (!psp_check_tee_support(psp, capability)) {
+ ret = tee_dev_init(psp);
if (ret)
return ret;
-
- kref_init(&misc_dev->refcount);
- } else {
- kref_get(&misc_dev->refcount);
- }
-
- init_waitqueue_head(&psp->sev_int_queue);
- psp->sev_misc = misc_dev;
- dev_dbg(dev, "registered SEV device\n");
-
- return 0;
-}
-
-static int psp_check_sev_support(struct psp_device *psp)
-{
- unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
-
- /*
- * Check for a access to the registers. If this read returns
- * 0xffffffff, it's likely that the system is running a broken
- * BIOS which disallows access to the device. Stop here and
- * fail the PSP initialization (but not the load, as the CCP
- * could get properly initialized).
- */
- if (val == 0xffffffff) {
- dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
- return -ENODEV;
- }
-
- if (!(val & 1)) {
- /* Device does not support the SEV feature */
- dev_dbg(psp->dev, "psp does not support SEV\n");
- return -ENODEV;
}
return 0;
@@ -980,6 +136,7 @@ int psp_dev_init(struct sp_device *sp)
{
struct device *dev = sp->dev;
struct psp_device *psp;
+ unsigned int capability;
int ret;
ret = -ENOMEM;
@@ -998,7 +155,11 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
- ret = psp_check_sev_support(psp);
+ capability = psp_get_capability(psp);
+ if (!capability)
+ goto e_disable;
+
+ ret = psp_check_support(psp, capability);
if (ret)
goto e_disable;
@@ -1013,7 +174,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = sev_misc_init(psp);
+ ret = psp_init(psp, capability);
if (ret)
goto e_irq;
@@ -1049,83 +210,52 @@ void psp_dev_destroy(struct sp_device *sp)
if (!psp)
return;
- if (psp->sev_misc)
- kref_put(&misc_dev->refcount, sev_exit);
+ sev_dev_destroy(psp);
+
+ tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
}
-int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
- void *data, int *error)
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
{
- if (!filep || filep->f_op != &sev_fops)
- return -EBADF;
-
- return sev_do_cmd(cmd, data, error);
+ psp->sev_irq_data = data;
+ psp->sev_irq_handler = handler;
}
-EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
-void psp_pci_init(void)
+void psp_clear_sev_irq_handler(struct psp_device *psp)
{
- struct sp_device *sp;
- int error, rc;
-
- sp = sp_get_psp_master_device();
- if (!sp)
- return;
-
- psp_master = sp->psp_data;
-
- psp_timeout = psp_probe_timeout;
+ psp_set_sev_irq_handler(psp, NULL, NULL);
+}
- if (sev_get_api_version())
- goto err;
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
+{
+ psp->tee_irq_data = data;
+ psp->tee_irq_handler = handler;
+}
- /*
- * If platform is not in UNINIT state then firmware upgrade and/or
- * platform INIT command will fail. These command require UNINIT state.
- *
- * In a normal boot we should never run into case where the firmware
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
- * may not go through a typical shutdown sequence and may leave the
- * firmware in INIT or WORKING state.
- */
+void psp_clear_tee_irq_handler(struct psp_device *psp)
+{
+ psp_set_tee_irq_handler(psp, NULL, NULL);
+}
- if (psp_master->sev_state != SEV_STATE_UNINIT) {
- sev_platform_shutdown(NULL);
- psp_master->sev_state = SEV_STATE_UNINIT;
- }
+struct psp_device *psp_get_master_device(void)
+{
+ struct sp_device *sp = sp_get_psp_master_device();
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(psp_master->dev) == 0)
- sev_get_api_version();
+ return sp ? sp->psp_data : NULL;
+}
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sp->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
+void psp_pci_init(void)
+{
+ psp_master = psp_get_master_device();
- if (rc) {
- dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_master)
return;
- }
-
- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
- psp_master->api_minor, psp_master->build);
-
- return;
-err:
- psp_master = NULL;
+ sev_pci_init();
}
void psp_pci_exit(void)
@@ -1133,5 +263,5 @@ void psp_pci_exit(void)
if (!psp_master)
return;
- sev_platform_shutdown(NULL);
+ sev_pci_exit();
}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index dd516b35ba86..ef38e4135d81 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
@@ -11,35 +11,20 @@
#define __PSP_DEV_H__
#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/dmapool.h>
-#include <linux/hw_random.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/psp-sev.h>
-#include <linux/miscdevice.h>
-#include <linux/capability.h>
#include "sp-dev.h"
-#define PSP_CMD_COMPLETE BIT(1)
-
-#define PSP_CMDRESP_CMD_SHIFT 16
-#define PSP_CMDRESP_IOC BIT(0)
#define PSP_CMDRESP_RESP BIT(31)
#define PSP_CMDRESP_ERR_MASK 0xffff
#define MAX_PSP_NAME_LEN 16
-struct sev_misc_dev {
- struct kref refcount;
- struct miscdevice misc;
-};
+extern struct psp_device *psp_master;
+
+typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
struct psp_device {
struct list_head entry;
@@ -52,16 +37,24 @@ struct psp_device {
void __iomem *io_regs;
- int sev_state;
- unsigned int sev_int_rcvd;
- wait_queue_head_t sev_int_queue;
- struct sev_misc_dev *sev_misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
+ psp_irq_handler_t sev_irq_handler;
+ void *sev_irq_data;
+
+ psp_irq_handler_t tee_irq_handler;
+ void *tee_irq_data;
- u8 api_major;
- u8 api_minor;
- u8 build;
+ void *sev_data;
+ void *tee_data;
};
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_sev_irq_handler(struct psp_device *psp);
+
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_tee_irq_handler(struct psp_device *psp);
+
+struct psp_device *psp_get_master_device(void);
+
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
new file mode 100644
index 000000000000..e467860f797d
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Encrypted Virtualization (SEV) interface
+ *
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+#include <linux/firmware.h>
+
+#include <asm/smp.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
+static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ if (sev->api_major > maj)
+ return true;
+
+ if (sev->api_major == maj && sev->api_minor >= min)
+ return true;
+
+ return false;
+}
+
+static void sev_irq_handler(int irq, void *data, unsigned int status)
+{
+ struct sev_device *sev = data;
+ int reg;
+
+ /* Check if it is command completion: */
+ if (!(status & SEV_CMD_COMPLETE))
+ return;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (reg & PSP_CMDRESP_RESP) {
+ sev->int_rcvd = 1;
+ wake_up(&sev->int_queue);
+ }
+}
+
+static int sev_wait_cmd_ioc(struct sev_device *sev,
+ unsigned int *reg, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(sev->int_queue,
+ sev->int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+
+ return 0;
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ sev = psp->sev_data;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+
+ sev->int_rcvd = 0;
+
+ reg = cmd;
+ reg <<= SEV_CMDRESP_CMD_SHIFT;
+ reg |= SEV_CMDRESP_IOC;
+ iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
+
+ /* wait for command completion */
+ ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ sev->state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ dev_dbg(sev->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+ sev->state = SEV_STATE_UNINIT;
+ dev_dbg(sev->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &sev->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = sev->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *data = &sev->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (sev->state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (sev->state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_get_api_version(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *status;
+ int error = 0, ret;
+
+ status = &sev->status_cmd_buf;
+ ret = sev_platform_status(status, &error);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to get status. Error: %#x\n", error);
+ return 1;
+ }
+
+ sev->api_major = status->api_major;
+ sev->api_minor = status->api_minor;
+ sev->build = status->build;
+ sev->state = status->state;
+
+ return 0;
+}
+
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+ struct sev_data_download_firmware *data;
+ const struct firmware *firmware;
+ int ret, error, order;
+ struct page *p;
+ u64 data_size;
+
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
+ return -1;
+ }
+
+ /*
+ * SEV FW expects the physical address given to it to be 32
+ * byte aligned. Memory allocated has structure placed at the
+ * beginning followed by the firmware being passed to the SEV
+ * FW. Allocate enough memory for data structure + alignment
+ * padding + SEV FW.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+ order = get_order(firmware->size + data_size);
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ ret = -1;
+ goto fw_err;
+ }
+
+ /*
+ * Copy firmware data to a kernel allocated contiguous
+ * memory region.
+ */
+ data = page_address(p);
+ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+ data->address = __psp_pa(page_address(p) + data_size);
+ data->len = firmware->size;
+
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ if (ret)
+ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+ else
+ dev_info(dev, "SEV firmware update successful\n");
+
+ __free_pages(p, order);
+
+fw_err:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (sev->state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+ struct sev_data_get_id *data;
+ u64 data_size, user_size;
+ void *id_blob, *mem;
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+ * 8-byte aligned. Memory allocated should be enough to
+ * hold data structure + alignment padding + memory
+ * where SEV FW writes the ID.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+ user_size = sizeof(struct sev_user_data_get_id);
+
+ mem = kzalloc(data_size + user_size, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ data = mem;
+ id_blob = mem + data_size;
+
+ data->address = __psp_pa(id_blob);
+ data->len = user_size;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ if (!ret) {
+ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+ ret = -EFAULT;
+ }
+
+ kfree(mem);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
+ ret = sev_ioctl_do_get_id(&input);
+ break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct sev_device *sev)
+{
+ struct device *dev = sev->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&sev->int_queue);
+ sev->misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+int sev_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sev_device *sev;
+ int ret = -ENOMEM;
+
+ sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ goto e_err;
+
+ psp->sev_data = sev;
+
+ sev->dev = dev;
+ sev->psp = psp;
+
+ sev->io_regs = psp->io_regs;
+
+ sev->vdata = (struct sev_vdata *)psp->vdata->sev;
+ if (!sev->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "sev: missing driver data\n");
+ goto e_err;
+ }
+
+ psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+
+ ret = sev_misc_init(sev);
+ if (ret)
+ goto e_irq;
+
+ dev_notice(dev, "sev enabled\n");
+
+ return 0;
+
+e_irq:
+ psp_clear_sev_irq_handler(psp);
+e_err:
+ psp->sev_data = NULL;
+
+ dev_notice(dev, "sev initialization failed\n");
+
+ return ret;
+}
+
+void sev_dev_destroy(struct psp_device *psp)
+{
+ struct sev_device *sev = psp->sev_data;
+
+ if (!sev)
+ return;
+
+ if (sev->misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ psp_clear_sev_irq_handler(psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void sev_pci_init(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int error, rc;
+
+ if (!sev)
+ return;
+
+ psp_timeout = psp_probe_timeout;
+
+ if (sev_get_api_version())
+ goto err;
+
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (sev->state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ sev->state = SEV_STATE_UNINIT;
+ }
+
+ if (sev_version_greater_or_equal(0, 15) &&
+ sev_update_firmware(sev->dev) == 0)
+ sev_get_api_version();
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
+ if (rc) {
+ dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ return;
+ }
+
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return;
+
+err:
+ psp_master->sev_data = NULL;
+}
+
+void sev_pci_exit(void)
+{
+ if (!psp_master->sev_data)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
new file mode 100644
index 000000000000..dd5c4fe82914
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __SEV_DEV_H__
+#define __SEV_DEV_H__
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+#include <linux/capability.h>
+
+#define SEV_CMD_COMPLETE BIT(1)
+#define SEV_CMDRESP_CMD_SHIFT 16
+#define SEV_CMDRESP_IOC BIT(0)
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sev_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ void __iomem *io_regs;
+
+ struct sev_vdata *vdata;
+
+ int state;
+ unsigned int int_rcvd;
+ wait_queue_head_t int_queue;
+ struct sev_misc_dev *misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+
+ u8 api_major;
+ u8 api_minor;
+ u8 build;
+};
+
+int sev_dev_init(struct psp_device *psp);
+void sev_dev_destroy(struct psp_device *psp);
+
+void sev_pci_init(void);
+void sev_pci_exit(void);
+
+#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 53c12562d31e..423594608ad1 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -39,10 +39,23 @@ struct ccp_vdata {
const unsigned int rsamax;
};
-struct psp_vdata {
+struct sev_vdata {
const unsigned int cmdresp_reg;
const unsigned int cmdbuff_addr_lo_reg;
const unsigned int cmdbuff_addr_hi_reg;
+};
+
+struct tee_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int ring_wptr_reg;
+ const unsigned int ring_rptr_reg;
+};
+
+struct psp_vdata {
+ const struct sev_vdata *sev;
+ const struct tee_vdata *tee;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b29d2e663e10..56c1f61c0f84 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -262,19 +262,42 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata pspv1 = {
+static const struct sev_vdata sevv1 = {
.cmdresp_reg = 0x10580,
.cmdbuff_addr_lo_reg = 0x105e0,
.cmdbuff_addr_hi_reg = 0x105e4,
+};
+
+static const struct sev_vdata sevv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+};
+
+static const struct tee_vdata teev1 = {
+ .cmdresp_reg = 0x10544,
+ .cmdbuff_addr_lo_reg = 0x10548,
+ .cmdbuff_addr_hi_reg = 0x1054c,
+ .ring_wptr_reg = 0x10550,
+ .ring_rptr_reg = 0x10554,
+};
+
+static const struct psp_vdata pspv1 = {
+ .sev = &sevv1,
.feature_reg = 0x105fc,
.inten_reg = 0x10610,
.intsts_reg = 0x10614,
};
static const struct psp_vdata pspv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .sev = &sevv2,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
+};
+
+static const struct psp_vdata pspv3 = {
+ .tee = &teev1,
.feature_reg = 0x109fc,
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
@@ -312,12 +335,22 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv2,
#endif
},
+ { /* 4 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv3,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
new file mode 100644
index 000000000000..5e697a90ea7f
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-tee.h>
+
+#include "psp-dev.h"
+#include "tee-dev.h"
+
+static bool psp_dead;
+
+static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+ void *start_addr;
+
+ if (!ring_size)
+ return -EINVAL;
+
+ /* We need actual physical address instead of DMA address, since
+ * Trusted OS running on AMD Secure Processor will map this region
+ */
+ start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size));
+ if (!start_addr)
+ return -ENOMEM;
+
+ rb_mgr->ring_start = start_addr;
+ rb_mgr->ring_size = ring_size;
+ rb_mgr->ring_pa = __psp_pa(start_addr);
+ mutex_init(&rb_mgr->mutex);
+
+ return 0;
+}
+
+static void tee_free_ring(struct psp_tee_device *tee)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+
+ if (!rb_mgr->ring_start)
+ return;
+
+ free_pages((unsigned long)rb_mgr->ring_start,
+ get_order(rb_mgr->ring_size));
+
+ rb_mgr->ring_start = NULL;
+ rb_mgr->ring_size = 0;
+ rb_mgr->ring_pa = 0;
+ mutex_destroy(&rb_mgr->mutex);
+}
+
+static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
+ unsigned int *reg)
+{
+ /* ~10ms sleep per loop => nloop = timeout * 100 */
+ int nloop = timeout * 100;
+
+ while (--nloop) {
+ *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ usleep_range(10000, 10100);
+ }
+
+ dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+static
+struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
+{
+ struct tee_init_ring_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa);
+ cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa);
+ cmd->size = tee->rb_mgr.ring_size;
+
+ dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n",
+ cmd->hi_addr, cmd->low_addr, cmd->size);
+
+ return cmd;
+}
+
+static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+static int tee_init_ring(struct psp_tee_device *tee)
+{
+ int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
+ struct tee_init_ring_cmd *cmd;
+ phys_addr_t cmd_buffer;
+ unsigned int reg;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024);
+
+ ret = tee_alloc_ring(tee, ring_size);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring allocation failed %d\n", ret);
+ return ret;
+ }
+
+ tee->rb_mgr.wptr = 0;
+
+ cmd = tee_alloc_cmd_buffer(tee);
+ if (!cmd) {
+ tee_free_ring(tee);
+ return -ENOMEM;
+ }
+
+ cmd_buffer = __psp_pa((void *)cmd);
+
+ /* Send command buffer details to Trusted OS by writing to
+ * CPU-PSP message registers
+ */
+
+ iowrite32(lower_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(upper_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
+ iowrite32(TEE_RING_INIT_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring init command timed out\n");
+ tee_free_ring(tee);
+ goto free_buf;
+ }
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ tee_free_ring(tee);
+ ret = -EIO;
+ }
+
+free_buf:
+ tee_free_cmd_buffer(cmd);
+
+ return ret;
+}
+
+static void tee_destroy_ring(struct psp_tee_device *tee)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!tee->rb_mgr.ring_start)
+ return;
+
+ if (psp_dead)
+ goto free_ring;
+
+ iowrite32(TEE_RING_DESTROY_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring destroy command timed out\n");
+ } else if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ }
+
+free_ring:
+ tee_free_ring(tee);
+}
+
+int tee_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_tee_device *tee;
+ int ret;
+
+ ret = -ENOMEM;
+ tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL);
+ if (!tee)
+ goto e_err;
+
+ psp->tee_data = tee;
+
+ tee->dev = dev;
+ tee->psp = psp;
+
+ tee->io_regs = psp->io_regs;
+
+ tee->vdata = (struct tee_vdata *)psp->vdata->tee;
+ if (!tee->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "tee: missing driver data\n");
+ goto e_err;
+ }
+
+ ret = tee_init_ring(tee);
+ if (ret) {
+ dev_err(dev, "tee: failed to init ring buffer\n");
+ goto e_err;
+ }
+
+ dev_notice(dev, "tee enabled\n");
+
+ return 0;
+
+e_err:
+ psp->tee_data = NULL;
+
+ dev_notice(dev, "tee initialization failed\n");
+
+ return ret;
+}
+
+void tee_dev_destroy(struct psp_device *psp)
+{
+ struct psp_tee_device *tee = psp->tee_data;
+
+ if (!tee)
+ return;
+
+ tee_destroy_ring(tee);
+}
+
+static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ void *buf, size_t len, struct tee_ring_cmd **resp)
+{
+ struct tee_ring_cmd *cmd;
+ u32 rptr, wptr;
+ int nloop = 1000, ret = 0;
+
+ *resp = NULL;
+
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ wptr = tee->rb_mgr.wptr;
+
+ /* Check if ring buffer is full */
+ do {
+ rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+
+ if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ break;
+
+ dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+
+ /* Wait if ring buffer is full */
+ mutex_unlock(&tee->rb_mgr.mutex);
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ } while (--nloop);
+
+ if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Pointer to empty data entry in ring buffer */
+ cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+
+ /* Write command data into ring buffer */
+ cmd->cmd_id = cmd_id;
+ cmd->cmd_state = TEE_CMD_STATE_INIT;
+ memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ memcpy(&cmd->buf[0], buf, len);
+
+ /* Update local copy of write pointer */
+ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+ tee->rb_mgr.wptr = 0;
+
+ /* Trigger interrupt to Trusted OS */
+ iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg);
+
+ /* The response is provided by Trusted OS in same
+ * location as submitted data entry within ring buffer.
+ */
+ *resp = cmd;
+
+unlock:
+ mutex_unlock(&tee->rb_mgr.mutex);
+
+ return ret;
+}
+
+static int tee_wait_cmd_completion(struct psp_tee_device *tee,
+ struct tee_ring_cmd *resp,
+ unsigned int timeout)
+{
+ /* ~5ms sleep per loop => nloop = timeout * 200 */
+ int nloop = timeout * 200;
+
+ while (--nloop) {
+ if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
+ resp->cmd_id);
+
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_tee_device *tee;
+ struct tee_ring_cmd *resp;
+ int ret;
+
+ if (!buf || !status || !len || len > sizeof(resp->buf))
+ return -EINVAL;
+
+ *status = 0;
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ tee = psp->tee_data;
+
+ ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp);
+ if (ret)
+ return ret;
+
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &resp->buf[0], len);
+ *status = resp->status;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_tee_process_cmd);
+
+int psp_check_tee_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_tee_status);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
new file mode 100644
index 000000000000..f09960112115
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ */
+
+/* This file describes the TEE communication interface between host and AMD
+ * Secure Processor
+ */
+
+#ifndef __TEE_DEV_H__
+#define __TEE_DEV_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#define TEE_DEFAULT_TIMEOUT 10
+#define MAX_BUFFER_SIZE 992
+
+/**
+ * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+ * @TEE_RING_INIT_CMD: Initialize ring buffer
+ * @TEE_RING_DESTROY_CMD: Destroy ring buffer
+ * @TEE_RING_MAX_CMD: Maximum command id
+ */
+enum tee_ring_cmd_id {
+ TEE_RING_INIT_CMD = 0x00010000,
+ TEE_RING_DESTROY_CMD = 0x00020000,
+ TEE_RING_MAX_CMD = 0x000F0000,
+};
+
+/**
+ * struct tee_init_ring_cmd - Command to init TEE ring buffer
+ * @low_addr: bits [31:0] of the physical address of ring buffer
+ * @hi_addr: bits [63:32] of the physical address of ring buffer
+ * @size: size of ring buffer in bytes
+ */
+struct tee_init_ring_cmd {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+#define MAX_RING_BUFFER_ENTRIES 32
+
+/**
+ * struct ring_buf_manager - Helper structure to manage ring buffer.
+ * @ring_start: starting address of ring buffer
+ * @ring_size: size of ring buffer in bytes
+ * @ring_pa: physical address of ring buffer
+ * @wptr: index to the last written entry in ring buffer
+ */
+struct ring_buf_manager {
+ struct mutex mutex; /* synchronizes access to ring buffer */
+ void *ring_start;
+ u32 ring_size;
+ phys_addr_t ring_pa;
+ u32 wptr;
+};
+
+struct psp_tee_device {
+ struct device *dev;
+ struct psp_device *psp;
+ void __iomem *io_regs;
+ struct tee_vdata *vdata;
+ struct ring_buf_manager rb_mgr;
+};
+
+/**
+ * enum tee_cmd_state - TEE command states for the ring buffer interface
+ * @TEE_CMD_STATE_INIT: initial state of command when sent from host
+ * @TEE_CMD_STATE_PROCESS: command being processed by TEE environment
+ * @TEE_CMD_STATE_COMPLETED: command processing completed
+ */
+enum tee_cmd_state {
+ TEE_CMD_STATE_INIT,
+ TEE_CMD_STATE_PROCESS,
+ TEE_CMD_STATE_COMPLETED,
+};
+
+/**
+ * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+ * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
+ * interface
+ * @cmd_state: refers to &enum tee_cmd_state
+ * @status: status of TEE command execution
+ * @res0: reserved region
+ * @pdata: private data (currently unused)
+ * @res1: reserved region
+ * @buf: TEE command specific buffer
+ */
+struct tee_ring_cmd {
+ u32 cmd_id;
+ u32 cmd_state;
+ u32 status;
+ u32 res0[1];
+ u64 pdata;
+ u32 res1[2];
+ u8 buf[MAX_BUFFER_SIZE];
+
+ /* Total size: 1024 bytes */
+} __packed;
+
+int tee_dev_init(struct psp_device *psp);
+void tee_dev_destroy(struct psp_device *psp);
+
+#endif /* __TEE_DEV_H__ */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 64d318dc0d47..2fc0e0da790b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
* revealed the decrypted message --> zero its memory.
*/
sg_zero_buffer(areq->dst, sg_nents(areq->dst),
- areq->cryptlen, 0);
+ areq->cryptlen, areq->assoclen);
err = -EBADMSG;
}
/*ENCRYPT*/
@@ -385,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
return -EINVAL;
break;
default:
- dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -399,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
if (ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_192 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
- goto badkey;
+ return rc;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
@@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
- rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
- goto badkey;
+ return -EINVAL;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
@@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = validate_keys_sizes(ctx);
if (rc)
- goto badkey;
+ return rc;
/* STAT_PHASE_1: Copy key to ctx */
@@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
- goto badkey;
+ return rc;
}
/* STAT_PHASE_2: Create sequence */
@@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
break; /* No auth. key setup */
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- rc = -ENOTSUPP;
- goto badkey;
+ return -ENOTSUPP;
}
/* STAT_PHASE_3: Submit sequence to HW */
@@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto setkey_error;
+ return rc;
}
}
/* Update STAT_PHASE_3 */
return rc;
-
-badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
-setkey_error:
- return rc;
}
static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
@@ -1567,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1925,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req,
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, areq_ctx->assoclen);
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2065,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2115,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2234,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2265,7 +2256,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2299,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2330,7 +2321,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3112b58d0bb1..7d6252d892d7 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
dev_err(dev, "Unsupported protected key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -303,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
keylen = hki.keylen;
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -394,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -523,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
}
}
+
static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
@@ -534,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
- dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
@@ -571,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -836,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req,
/* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
- dev_err(dev, "Unsupported data size %d.\n", nbytes);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
goto exit_process;
}
@@ -881,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup IV and XEX key used */
+ /* Setup state (IV) */
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Setup MLLI line, if needed */
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+ /* Setup state (IV and XEX key) */
+ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
/* Read next IV */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..532bc95a8373 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -133,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
- /* if driver suspended return, probebly shared interrupt */
+ /* if driver suspended return, probably shared interrupt */
if (cc_pm_is_dev_suspended(dev))
return IRQ_NONE;
@@ -271,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
struct clk *clk;
+ int irq;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -337,9 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
&req_mem_cc_regs->start, new_drvdata->cc_base);
/* Then IRQ */
- new_drvdata->irq = platform_get_irq(plat_dev, 0);
- if (new_drvdata->irq < 0)
- return new_drvdata->irq;
+ irq = platform_get_irq(plat_dev, 0);
+ if (irq < 0)
+ return irq;
init_completion(&new_drvdata->hw_queue_avail);
@@ -442,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
/* register the driver isr function */
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
+ new_drvdata);
if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
@@ -465,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_fips_init(new_drvdata);
if (rc) {
- dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
goto post_debugfs_err;
}
rc = cc_sram_mgr_init(new_drvdata);
@@ -490,13 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_buffer_mgr_init(new_drvdata);
if (rc) {
- dev_err(dev, "buffer_mgr_init failed\n");
+ dev_err(dev, "cc_buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
if (rc) {
- dev_err(dev, "ssi_power_mgr_init failed\n");
+ dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index ab31d4a68c80..c227718ba992 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -28,7 +28,6 @@
/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
-#define CC_DEV_SHA_MAX 512
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
#include "cc_sram_mgr.h"
@@ -133,13 +132,11 @@ struct cc_crypto_req {
/**
* struct cc_drvdata - driver private data context
* @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @irq: bitmap indicating source of last interrupt
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
- u32 irq_mask;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
@@ -161,6 +158,7 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
+ bool pm_on;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 4c8bce33abcf..702aefc21447 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -120,7 +120,7 @@ static void fips_dsr(unsigned long devarg)
cc_tee_handle_fips_error(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt.
*/
val = (CC_REG(HOST_IMR) & ~irq);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..912e5ce5079d 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -2358,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(cc_digest_len_init);
-#endif
default:
return digest_len_addr; /*to avoid kernel crash*/
}
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index dbc508fb719b..24c368b866f6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- rc = cc_suspend_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
- return rc;
- }
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
@@ -48,7 +42,7 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
- /* wait for Crytpcell reset completion */
+ /* wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
return -EBUSY;
@@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev)
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
- rc = cc_resume_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
return 0;
@@ -80,28 +67,20 @@ int cc_pm_get(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (cc_req_queue_suspended(drvdata))
+ if (drvdata->pm_on)
rc = pm_runtime_get_sync(dev);
- else
- pm_runtime_get_noresume(dev);
- return rc;
+ return (rc == 1 ? 0 : rc);
}
-int cc_pm_put_suspend(struct device *dev)
+void cc_pm_put_suspend(struct device *dev)
{
- int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (!cc_req_queue_suspended(drvdata)) {
+ if (drvdata->pm_on) {
pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
+ pm_runtime_put_autosuspend(dev);
}
- return rc;
}
bool cc_pm_is_dev_suspended(struct device *dev)
@@ -114,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- /* must be before the enabling to avoid resdundent suspending */
+ /* must be before the enabling to avoid redundant suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
+ /* set us as active - note we won't do PM ops until cc_pm_go()! */
return pm_runtime_set_active(dev);
}
@@ -125,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = true;
}
void cc_pm_fini(struct cc_drvdata *drvdata)
{
pm_runtime_disable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = false;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index a7d98a5da2e1..80a18e11cae4 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
-int cc_pm_put_suspend(struct device *dev);
+void cc_pm_put_suspend(struct device *dev);
bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-static inline int cc_pm_suspend(struct device *dev)
-{
- return 0;
-}
-
-static inline int cc_pm_resume(struct device *dev)
-{
- return 0;
-}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
}
-static inline int cc_pm_put_suspend(struct device *dev)
-{
- return 0;
-}
+static inline void cc_pm_put_suspend(struct device *dev) {}
static inline bool cc_pm_is_dev_suspended(struct device *dev)
{
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index a947d5a2cf35..9d61e6f12478 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
#else
struct tasklet_struct comptask;
#endif
- bool is_runtime_suspended;
};
struct cc_bl_item {
@@ -230,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
- * be chaned during the poll because the spinlock_bh
+ * be changed during the poll because the spinlock_bh
* is held by the thread
*/
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
@@ -275,12 +274,11 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
* \param len The crypto sequence length
* \param add_comp If "true": add an artificial dout DMA to mark completion
*
- * \return int Returns -EINPROGRESS or error code
*/
-static int cc_do_send_request(struct cc_drvdata *drvdata,
- struct cc_crypto_req *cc_req,
- struct cc_hw_desc *desc, unsigned int len,
- bool add_comp)
+static void cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
@@ -303,8 +301,8 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
@@ -328,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/* Update the free slots in HW queue */
req_mgr_h->q_free_slots -= total_seq_len;
}
-
- /* Operation still in process */
- return -EINPROGRESS;
}
static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
@@ -390,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
return;
}
- rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
- bli->len, false);
-
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
+ false);
spin_unlock(&mgr->hw_lock);
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- creq->user_cb(dev, req, rc);
- }
-
/* Remove ourselves from the backlog list */
spin_lock(&mgr->bl_lock);
list_del(&bli->list);
--mgr->bl_len;
+ kfree(bli);
}
spin_unlock(&mgr->bl_lock);
@@ -422,7 +412,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -451,8 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
return -EBUSY;
}
- if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
+ if (!rc) {
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
+ rc = -EINPROGRESS;
+ }
spin_unlock_bh(&mgr->hw_lock);
return rc;
@@ -472,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -492,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
reinit_completion(&drvdata->hw_queue_avail);
}
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
spin_unlock_bh(&mgr->hw_lock);
-
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- return rc;
- }
-
wait_for_completion(&cc_req->seq_compl);
return 0;
}
@@ -532,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
enqueue_seq(drvdata, desc, len);
@@ -668,7 +654,7 @@ static void comp_handler(unsigned long devarg)
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
@@ -677,52 +663,3 @@ static void comp_handler(unsigned long devarg)
cc_proc_backlog(drvdata);
dev_dbg(dev, "Comp. handler done.\n");
}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens
- * inside the spin lock protection
- */
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int cc_suspend_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index f46cf766fe4d..ff7746aaaf35 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata);
-
-int cc_suspend_req_queue(struct cc_drvdata *drvdata);
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
-#endif
-
#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 91e424378217..f078b2686418 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO
will be called chcr.
config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- ---help---
- Enable support for IPSec Tx Inline.
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- ---help---
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS_TOE
+ select CRYPTO_DEV_CHELSIO
+ ---help---
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
- To compile this driver as a module, choose M here: the module
- will be called chtls.
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 1b4a5664e604..b4b9b22125d1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- int err = 0;
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
- return err;
+ return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx)
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id =
+ (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_SW;
break;
default:
-
- crypto_tfm_set_flags((struct crypto_tfm *) tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
return chcr_ccm_common_setkey(aead, key, keylen);
@@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
int error;
if (keylen < 3) {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
keylen -= 3;
@@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
& CRYPTO_TFM_REQ_MASK);
ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (ret)
goto out;
@@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
@@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
if (get_alg_config(&param, max_authsize)) {
pr_err("chcr : Unsupported digest size\n");
@@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
+
subtype = get_aead_subtype(authenc);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 029a7354f541..e937605670ac 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- struct adapter *adap;
-
mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
@@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
list_move(&u_ctx->entry, &drv_data.inact_dev);
if (list_empty(&drv_data.act_dev))
drv_data.last_dev = NULL;
- adap = padap(&u_ctx->dev);
- memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
atomic_dec(&drv_data.dev_count);
mutex_unlock(&drv_data.drv_mutex);
@@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void)
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
stop_crypto();
-
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
mutex_lock(&drv_data.drv_mutex);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index d2bc655ab931..459442704eb1 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -179,7 +179,10 @@ struct chtls_hws {
u32 copied_seq;
u64 tx_seq_no;
struct tls_scmd scmd;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ } crypto_info;
};
struct chtls_sock {
@@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index aca75237bbcf..9b2745ad9e38 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@@ -1273,7 +1286,7 @@ static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
ctx = (struct listen_ctx *)data;
lsk = ctx->lsk;
- if (unlikely(tid >= cdev->tids->ntids)) {
+ if (unlikely(tid_out_of_range(cdev->tids, tid))) {
pr_info("passive open TID %u too large\n", tid);
return 1;
}
@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+ kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
@@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
if (!reply_skb) {
req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
@@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
-/*
- * Add an skb to the deferred skb queue for processing from process context.
- */
-static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
- defer_handler_t handler)
-{
- DEFERRED_SKB_CB(skb)->handler = handler;
- spin_lock_bh(&cdev->deferq.lock);
- __skb_queue_tail(&cdev->deferq, skb);
- if (skb_queue_len(&cdev->deferq) == 1)
- schedule_work(&cdev->deferq_task);
- spin_unlock_bh(&cdev->deferq.lock);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
-static struct sk_buff *dequeue_wr(struct sock *sk)
-{
- struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct sk_buff *skb = csk->wr_skb_head;
-
- if (likely(skb)) {
- /* Don't bother clearing the tail */
- csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
- WR_SKB_CB(skb)->next_wr = NULL;
- }
- return skb;
-}
-
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 129d7ac649a9..3fac0c74a41f 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline void chtls_reset_wr_list(struct chtls_sock *csk)
+{
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+}
+
static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
{
WR_SKB_CB(skb)->next_wr = NULL;
@@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
csk->wr_skb_tail = skb;
}
+
+static inline struct sk_buff *dequeue_wr(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb = NULL;
+
+ skb = csk->wr_skb_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+ WR_SKB_CB(skb)->next_wr = NULL;
+ }
+ return skb;
+}
#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 2a34035d3cfb..f1820aca0d33 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
static int chtls_key_info(struct chtls_sock *csk,
struct _key_ctx *kctx,
- u32 keylen, u32 optname)
+ u32 keylen, u32 optname,
+ int cipher_type)
{
- unsigned char key[AES_KEYSIZE_128];
- struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
+ unsigned char key[AES_MAX_KEY_SIZE];
+ unsigned char *key_p, *salt;
unsigned char ghash_h[AEAD_H_SIZE];
- int ck_size, key_ctx_size;
+ int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
struct crypto_aes_ctx aes;
int ret;
- gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
- &csk->tlshws.crypto_info;
-
key_ctx_size = sizeof(struct _key_ctx) +
roundup(keylen, 16) + AEAD_H_SIZE;
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else {
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * prepare key context base on GCM cipher type
+ */
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
+ (struct tls12_crypto_info_aes_gcm_128 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_128->key, keylen);
+
+ key_p = gcm_ctx_128->key;
+ salt = gcm_ctx_128->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
+ (struct tls12_crypto_info_aes_gcm_256 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_256->key, keylen);
+
+ key_p = gcm_ctx_256->key;
+ salt = gcm_ctx_256->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ break;
+ }
+ default:
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
}
- memcpy(key, gcm_ctx->key, keylen);
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
@@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk,
key_ctx = ((key_ctx_size >> 4) << 3);
kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx);
chtls_rxkey_ivauth(kctx);
} else {
kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx_size >> 4);
}
- memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(kctx->key, gcm_ctx->key, keylen);
+ memcpy(kctx->salt, salt, salt_size);
+ memcpy(kctx->key, key_p, keylen);
memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
/* erase key info from driver */
- memset(gcm_ctx->key, 0, keylen);
+ memset(key_p, 0, keylen);
return 0;
}
@@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk)
SCMD_TLS_FRAG_ENABLE_V(1);
}
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ u32 optname, int cipher_type)
{
struct tls_key_req *kwr;
struct chtls_dev *cdev;
@@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
kwr->sc_imm.len = cpu_to_be32(klen);
+ lock_sock(sk);
/* key info */
kctx = (struct _key_ctx *)(kwr + 1);
- ret = chtls_key_info(csk, kctx, keylen, optname);
+ ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
if (ret)
goto out_notcb;
@@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
csk->tlshws.txkey = keyid;
}
+ release_sock(sk);
return ret;
out_notcb:
+ release_sock(sk);
free_tls_keyid(sk);
out_nokey:
kfree_skb(skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 18996935d8ba..a038de90b2ea 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
- int err;
if (sk->sk_protocol != IPPROTO_TCP)
return -EPROTONOSUPPORT;
@@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
- err = raw_notifier_call_chain(&listen_notify_list,
+ raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
- return err;
+ return 0;
}
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
@@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
struct tls_crypto_info *crypto_info, tmp_crypto_info;
struct chtls_sock *csk;
int keylen;
+ int cipher_type;
int rc = 0;
csk = rcu_dereference_sk_user_data(sk);
@@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * copy keys from user based on GCM cipher type.
+ */
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
/* Obtain version and type from previous copy */
@@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
}
keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
- rc = chtls_setkey(csk, keylen, optname);
+ cipher_type = TLS_CIPHER_AES_GCM_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ crypto_info[0] = tmp_crypto_info;
+ rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+ optval + sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_256)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_256;
break;
}
default:
rc = -EINVAL;
goto out;
}
+ rc = chtls_setkey(csk, keylen, optname, cipher_type);
out:
return rc;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 73a899e6f837..f4f18bfc2247 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -119,11 +118,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -132,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
tctx->fallback.cip->base.crt_flags |=
(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
- return ret;
+ return crypto_cipher_setkey(tctx->fallback.cip, key, len);
}
static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -154,11 +144,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -168,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(tctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->fallback.skcipher) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
}
static void
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 4e7323884ae3..354836468c5d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2507,7 +2507,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
addr = pci_resource_start(pdev, i);
size = pci_resource_len(pdev, i);
- dev->bar[i] = ioremap_nocache(addr, size);
+ dev->bar[i] = ioremap(addr, size);
if (!dev->bar[i]) {
err = -ENOMEM;
goto err_out_unmap_bars;
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index c0e7a85fe129..8851161f722f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -16,16 +16,22 @@ config CRYPTO_DEV_HISI_SEC
config CRYPTO_DEV_HISI_SEC2
tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_DEV_HISI_QM
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
- CBC, and XTS cipher mode.
+ CBC, and XTS cipher mode, and AEAD algorithms.
To compile this as a module, choose M here: the module
will be called hisi_sec2.
@@ -44,7 +50,6 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select SG_SPLIT
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 98f037e6ea3e..5d400d69e8e4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
- if (id < 0)
+ if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
@@ -174,8 +174,8 @@ static struct hisi_qp *hpre_get_qp_and_start(void)
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
- if (dma_mapping_error(dev, *tmp)) {
+ if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int shift;
shift = ctx->key_sz - len;
- if (shift < 0)
+ if (unlikely(shift < 0))
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
- if (!ptr)
+ if (unlikely(!ptr))
return -ENOMEM;
if (is_src) {
@@ -226,12 +226,12 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
}
static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp;
+ dma_addr_t tmp = 0;
int ret;
/* when the data is dh's source, we should format it */
@@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
else
ret = hpre_prepare_dma_buf(hpre_req, data, len,
is_src, &tmp);
- if (ret)
+ if (unlikely(ret))
return ret;
if (is_src)
@@ -253,15 +253,16 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
}
static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst, struct scatterlist *src)
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
{
struct device *dev = HPRE_DEV(ctx);
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (src) {
@@ -274,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (req->dst) {
@@ -288,7 +289,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
+ void **kreq)
{
struct hpre_asym_request *req;
int err, id, done;
@@ -308,7 +309,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
- if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
@@ -375,7 +376,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_sqe *sqe = resp;
- ctx->req_list[sqe->tag]->cb(ctx, resp);
+ ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -454,33 +455,30 @@ static int hpre_dh_compute_value(struct kpp_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
ret = hpre_msg_request_set(ctx, req, false);
- if (ret)
+ if (unlikely(ret))
return ret;
if (req->src) {
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
if (ctx->crt_g2_mode && !req->src)
- msg->dw0 |= HPRE_ALG_DH_G2;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
- msg->dw0 |= HPRE_ALG_DH;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -520,12 +518,12 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
return -EINVAL;
if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
+ HPRE_BITS_2_BYTES_SHIFT))
return -EINVAL;
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
if (!ctx->dh.xa_p)
return -ENOMEM;
@@ -559,13 +557,12 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) {
- memset(ctx->dh.g, 0, sz);
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
ctx->dh.g = NULL;
}
if (ctx->dh.xa_p) {
- memset(ctx->dh.xa_p, 0, sz);
+ memzero_explicit(ctx->dh.xa_p, sz);
dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
ctx->dh.dma_xa_p);
ctx->dh.xa_p = NULL;
@@ -661,9 +658,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -673,22 +667,22 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.pubkey)
+ if (unlikely(!ctx->rsa.pubkey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -696,7 +690,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -716,9 +710,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -728,27 +719,29 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.prikey)
+ if (unlikely(!ctx->rsa.prikey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
- msg->dw0 |= HPRE_ALG_NC_CRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_CRT);
} else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_NCRT);
}
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -756,7 +749,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -811,10 +804,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
- ctx->rsa.pubkey = NULL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
return -EINVAL;
- }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -836,17 +827,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
return 0;
}
-static int hpre_crt_para_get(char *para, const char *raw,
- unsigned int raw_sz, unsigned int para_size)
+static int hpre_crt_para_get(char *para, size_t para_sz,
+ const char *raw, size_t raw_sz)
{
const char *ptr = raw;
size_t len = raw_sz;
hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_size)
+ if (!len || len > para_sz)
return -EINVAL;
- memcpy(para + para_size - len, ptr, len);
+ memcpy(para + para_sz - len, ptr, len);
return 0;
}
@@ -864,32 +855,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
if (!ctx->rsa.crt_prikey)
return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
- rsa_key->dq_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
+ rsa_key->dq, rsa_key->dq_sz);
if (ret)
goto free_key;
offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
- rsa_key->dp_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->dp, rsa_key->dp_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->q, rsa_key->q_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->p, rsa_key->p_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->qinv, rsa_key->qinv_sz);
if (ret)
goto free_key;
@@ -899,7 +890,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
free_key:
offset = hlf_ksz * HPRE_CRT_PRMS;
- memset(ctx->rsa.crt_prikey, 0, offset);
+ memzero_explicit(ctx->rsa.crt_prikey, offset);
dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
@@ -924,14 +915,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
}
if (ctx->rsa.crt_prikey) {
- memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ memzero_explicit(ctx->rsa.crt_prikey,
+ half_key_sz * HPRE_CRT_PRMS);
dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
}
if (ctx->rsa.prikey) {
- memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
ctx->rsa.dma_prikey);
ctx->rsa.prikey = NULL;
@@ -1043,6 +1035,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->rsa.soft_tfm)) {
@@ -1050,7 +1043,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- return hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx);
+ if (ret)
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+
+ return ret;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 34e0424410bf..401747de67a8 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -73,7 +73,7 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
#define HPRE_QM_USR_CFG_MASK 0xfffffffe
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
@@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
- { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
- { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
- { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
- { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
- { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
- { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
- { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
- { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
- { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
- { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
- { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
+ { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
+ { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
+ { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
+ { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
+ { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
+ { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
+ { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
+ { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
+ { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
{ /* sentinel */ }
};
@@ -490,7 +490,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -557,7 +557,7 @@ static const struct file_operations hpre_ctrl_debug_fops = {
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
if (dir)
file_dir = dir;
@@ -571,10 +571,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -585,7 +583,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -595,10 +592,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
return 0;
}
@@ -609,15 +603,14 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- sprintf(buf, "cluster%d", i);
-
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+ if (ret < 0)
+ return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -627,9 +620,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -668,9 +659,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
int ret;
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
qm->debug.debug_root = dir;
ret = hisi_qm_debug_init(qm);
@@ -1014,8 +1002,6 @@ static void hpre_register_debugfs(void)
return;
hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
- if (IS_ERR_OR_NULL(hpre_debugfs_root))
- hpre_debugfs_root = NULL;
}
static void hpre_unregister_debugfs(void)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 26754d0570ba..13e2d8d7be94 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -9,10 +9,12 @@
#include "../qm.h"
#include "sec_crypto.h"
-/* Cipher resource per hardware SEC queue */
-struct sec_cipher_res {
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
@@ -21,33 +23,35 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
};
+struct sec_aead_req {
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+ struct aead_request *aead_req;
+};
+
/* SEC request of Crypto */
struct sec_req {
struct sec_sqe sec_sqe;
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
- /* Cipher supported only at present */
struct sec_cipher_req c_req;
+ struct sec_aead_req aead_req;
+
int err_type;
int req_id;
/* Status of the SEC request */
- int fake_busy;
+ bool fake_busy;
};
/**
* struct sec_req_op - Operations for SEC request
- * @get_res: Get resources for TFM on the SEC device
- * @resource_alloc: Allocate resources for queue context on the SEC device
- * @resource_free: Free resources for queue context on the SEC device
* @buf_map: DMA map the SGL buffers of the request
* @buf_unmap: DMA unmap the SGL buffers of the request
* @bd_fill: Fill the SEC queue BD
@@ -56,18 +60,25 @@ struct sec_req {
* @process: Main processing logic of Skcipher
*/
struct sec_req_op {
- int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
- int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
- void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
- void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
};
+/* SEC auth context */
+struct sec_auth_ctx {
+ dma_addr_t a_key_dma;
+ u8 *a_key;
+ u8 a_key_len;
+ u8 mac_len;
+ u8 a_alg;
+ struct crypto_shash *hash_tfm;
+};
+
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 *c_key;
@@ -83,9 +94,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req **req_list;
+ struct sec_req *req_list[QM_Q_DEPTH];
struct idr req_idr;
- void *alg_meta_data;
+ struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
struct hisi_acc_sgl_pool *c_in_pool;
@@ -93,6 +104,11 @@ struct sec_qp_ctx {
atomic_t pending_reqs;
};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
@@ -110,7 +126,10 @@ struct sec_ctx {
/* Currrent cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
+
+ enum sec_alg_type alg_type;
struct sec_cipher_ctx c_ctx;
+ struct sec_auth_ctx a_ctx;
};
enum sec_endian {
@@ -132,8 +151,8 @@ struct sec_debug_file {
};
struct sec_dfx {
- u64 send_cnt;
- u64 recv_cnt;
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
struct sec_debug {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 62b04e19067c..a2cfcc9ccd94 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -3,7 +3,11 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/authenc.h>
#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
@@ -27,6 +31,10 @@
#define SEC_SRC_SGL_OFFSET 7
#define SEC_CKEY_OFFSET 9
#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
@@ -35,12 +43,19 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-static DEFINE_MUTEX(sec_algs_lock);
-static unsigned int sec_active_devs;
+static atomic_t sec_active_devs;
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
@@ -50,7 +65,7 @@ static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
ctx->hlf_q_num;
}
-static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
atomic_dec(&ctx->enc_qcyclic);
@@ -67,7 +82,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
- if (req_id < 0) {
+ if (unlikely(req_id < 0)) {
dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
return req_id;
}
@@ -82,7 +97,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
@@ -95,36 +110,66 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
+static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac = mac_out + SEC_MAX_MAC_LEN;
+ struct scatterlist *sgl = aead_req->src;
+ size_t sz;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
+ aead_req->cryptlen + aead_req->assoclen -
+ authsize);
+ if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
+ dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
u16 done, flag;
+ int err = 0;
u8 type;
- struct sec_req *req;
type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (type == SEC_BD_TYPE2) {
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
- req->err_type = bd->type2.error_type;
-
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (req->err_type || done != 0x1 || flag != 0x2)
- dev_err(SEC_CTX_DEV(req->ctx),
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- } else {
+ if (unlikely(type != SEC_BD_TYPE2)) {
pr_err("err bd type [%d]\n", type);
return;
}
- __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
+ (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
+ dev_err(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
+ }
+
+ if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
+ err = sec_aead_verify(req, qp_ctx);
- req->ctx->req_op->buf_unmap(req->ctx, req);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
- req->ctx->req_op->callback(req->ctx, req);
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
}
static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
@@ -135,9 +180,9 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
- __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- if (ret == -EBUSY)
+ if (unlikely(ret == -EBUSY))
return -ENOBUFS;
if (!ret) {
@@ -150,6 +195,91 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
+
+static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ &res->out_mac_dma, GFP_KERNEL);
+ if (!res->out_mac)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].out_mac_dma = res->out_mac_dma +
+ i * (SEC_MAX_MAC_LEN << 1);
+ res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
+ }
+
+ return 0;
+}
+
+static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->out_mac)
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac, res->out_mac_dma);
+}
+
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
+ int ret;
+
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_mac_resource(dev, res);
+ if (ret)
+ goto get_fail;
+ }
+
+ return 0;
+get_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
+}
+
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
@@ -173,15 +303,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list)
- goto err_destroy_idr;
-
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_free_req_list;
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
@@ -191,7 +317,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
goto err_free_c_in_pool;
}
- ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -202,13 +328,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_req_list:
- kfree(qp_ctx->req_list);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
hisi_qm_release_qp(qp);
@@ -222,66 +346,42 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- kfree(qp_ctx->req_list);
hisi_qm_release_qp(qp_ctx->qp);
}
-static int sec_skcipher_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx;
struct sec_dev *sec;
- struct device *dev;
- struct hisi_qm *qm;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
-
sec = sec_find_device(cpu_to_node(smp_processor_id()));
if (!sec) {
- pr_err("find no Hisilicon SEC device!\n");
+ pr_err("Can not find proper Hisilicon SEC device!\n");
return -ENODEV;
}
ctx->sec = sec;
- qm = &sec->qm;
- dev = &qm->pdev->dev;
- ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx)
return -ENOMEM;
for (i = 0; i < sec->ctx_q_num; i++) {
- ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
- c_ctx = &ctx->c_ctx;
- c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
- if (c_ctx->ivsize > SEC_IV_SIZE) {
- dev_err(dev, "get error iv size!\n");
- ret = -EINVAL;
- goto err_sec_release_qp_ctx;
- }
- c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
- &c_ctx->c_key_dma, GFP_KERNEL);
- if (!c_ctx->c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
return 0;
-
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -290,17 +390,9 @@ err_sec_release_qp_ctx:
return ret;
}
-static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- int i = 0;
-
- if (c_ctx->c_key) {
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
- c_ctx->c_key, c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -308,6 +400,85 @@ static void sec_skcipher_exit(struct crypto_skcipher *tfm)
kfree(ctx->qp_ctx);
}
+static int sec_cipher_init(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_cipher_uninit(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+}
+
+static int sec_auth_init(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &a_ctx->a_key_dma, GFP_KERNEL);
+ if (!a_ctx->a_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_auth_uninit(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key, a_ctx->a_key_dma);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ctx = crypto_skcipher_ctx(tfm);
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
+
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
const u32 keylen,
const enum sec_cmode c_mode)
@@ -420,62 +591,8 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_skcipher_get_res(struct sec_ctx *ctx,
- struct sec_req *req)
-{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
- struct sec_cipher_req *c_req = &req->c_req;
- int req_id = req->req_id;
-
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
-
- return 0;
-}
-
-static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_res *res;
- int i;
-
- res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
- &res->c_ivin_dma, GFP_KERNEL);
- if (!res->c_ivin) {
- kfree(res);
- return -ENOMEM;
- }
-
- for (i = 1; i < QM_Q_DEPTH; i++) {
- res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
- res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
- }
- qp_ctx->alg_meta_data = res;
-
- return 0;
-}
-
-static void sec_skcipher_resource_free(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_cipher_res *res = qp_ctx->alg_meta_data;
- struct device *dev = SEC_CTX_DEV(ctx);
-
- if (!res)
- return;
-
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
- kfree(res);
-}
-
-static int sec_skcipher_map(struct device *dev, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static int sec_cipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -509,12 +626,20 @@ static int sec_skcipher_map(struct device *dev, struct sec_req *req,
return 0;
}
+static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+}
+
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
- c_req->sk_req->src, c_req->sk_req->dst);
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
@@ -523,10 +648,127 @@ static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_req *c_req = &req->c_req;
struct skcipher_request *sk_req = c_req->sk_req;
- if (sk_req->dst != sk_req->src)
- hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+ sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+}
+
+static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
+ struct crypto_authenc_keys *keys)
+{
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aead aes key error!\n");
+ return -EINVAL;
+ }
+ memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
+
+ return 0;
+}
+
+static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ struct crypto_authenc_keys *keys)
+{
+ struct crypto_shash *hash_tfm = ctx->hash_tfm;
+ SHASH_DESC_ON_STACK(shash, hash_tfm);
+ int blocksize, ret;
- hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+ if (!keys->authkeylen) {
+ pr_err("hisi_sec2: aead auth key error!\n");
+ return -EINVAL;
+ }
+
+ blocksize = crypto_shash_blocksize(hash_tfm);
+ if (keys->authkeylen > blocksize) {
+ ret = crypto_shash_digest(shash, keys->authkey,
+ keys->authkeylen, ctx->a_key);
+ if (ret) {
+ pr_err("hisi_sec2: aead auth digest error!\n");
+ return -EINVAL;
+ }
+ ctx->a_key_len = blocksize;
+ } else {
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ ctx->a_key_len = keys->authkeylen;
+ }
+
+ return 0;
+}
+
+static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ const u32 keylen, const enum sec_hash_alg a_alg,
+ const enum sec_calg c_alg,
+ const enum sec_mac_len mac_len,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ ctx->a_ctx.a_alg = a_alg;
+ ctx->c_ctx.c_alg = c_alg;
+ ctx->a_ctx.mac_len = mac_len;
+ c_ctx->c_mode = c_mode;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ ret = sec_aead_aes_set_key(c_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ goto bad_key;
+ }
+
+ return 0;
+bad_key:
+ memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
+
+ return -EINVAL;
+}
+
+
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
+ u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+ SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+ SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+ SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+
+static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+}
+
+static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *cq = &req->c_req;
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ sec_cipher_unmap(dev, cq, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -534,13 +776,13 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ctx->req_op->do_transfer(ctx, req);
ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto unmap_req_buf;
return ret;
@@ -559,10 +801,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
- c_req->c_len = sk_req->cryptlen;
- memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -570,14 +811,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_sqe *sec_sqe = &req->sec_sqe;
- u8 de = 0;
u8 scene, sa_type, da_type;
u8 bd_type, cipher;
+ u8 de = 0;
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -611,25 +853,37 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return 0;
}
-static void sec_update_iv(struct sec_req *req)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct skcipher_request *sk_req = req->c_req.sk_req;
u32 iv_size = req->ctx->c_ctx.ivsize;
struct scatterlist *sgl;
+ unsigned int cryptlen;
size_t sz;
+ u8 *iv;
if (req->c_req.encrypt)
- sgl = sk_req->dst;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
else
- sgl = sk_req->src;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
- iv_size, sk_req->cryptlen - iv_size);
- if (sz != iv_size)
+ if (alg_type == SEC_SKCIPHER) {
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
+ } else {
+ iv = aead_req->iv;
+ cryptlen = aead_req->cryptlen;
+ }
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -638,13 +892,109 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
- sec_update_iv(req);
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ if (req->fake_busy)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- sk_req->base.complete(&sk_req->base, req->err_type);
+ sk_req->base.complete(&sk_req->base, err);
+}
+
+static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+
+ memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+}
+
+static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sec_sqe->type2.mac_key_alg =
+ cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)((ctx->a_key_len) /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
+
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+}
+
+static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ int ret;
+
+ ret = sec_skcipher_bd_fill(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ return ret;
+ }
+
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+{
+ struct aead_request *a_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_cipher_req *c_req = &req->c_req;
+ size_t authsize = crypto_aead_authsize(tfm);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ size_t sz;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+
+ if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
+ sec_update_iv(req, SEC_AEAD);
+
+ /* Copy output mac */
+ if (!err && c_req->encrypt) {
+ struct scatterlist *sgl = a_req->dst;
+
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+ qp_ctx->res[req->req_id].out_mac,
+ authsize, a_req->cryptlen +
+ a_req->assoclen);
+
+ if (unlikely(sz != authsize)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ err = -EINVAL;
+ }
+ }
+
+ sec_free_req_id(req);
+
+ if (req->fake_busy)
+ a_req->base.complete(&a_req->base, -EINPROGRESS);
+
+ a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
@@ -653,37 +1003,30 @@ static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
- sec_put_queue_id(ctx, req);
+ sec_free_queue_id(ctx, req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int issue_id, ret;
+ int queue_id;
/* To load balance */
- issue_id = sec_get_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[issue_id];
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id < 0) {
- sec_put_queue_id(ctx, req);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
return req->req_id;
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = 1;
+ req->fake_busy = true;
else
- req->fake_busy = 0;
-
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- atomic_dec(&qp_ctx->pending_reqs);
- sec_request_uninit(ctx, req);
- dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
- }
+ req->fake_busy = false;
- return ret;
+ return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
@@ -691,20 +1034,20 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = sec_request_init(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ret = sec_request_transfer(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto err_uninit_req;
/* Output IV as decrypto */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- sec_update_iv(req);
+ sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -712,9 +1055,16 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
err_send_req:
/* As failing, restore the IV from user */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
- ctx->c_ctx.ivsize);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ else
+ memcpy(req->aead_req.aead_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ }
sec_request_untransfer(ctx, req);
err_uninit_req:
@@ -723,10 +1073,7 @@ err_uninit_req:
return ret;
}
-static struct sec_req_op sec_req_ops_tbl = {
- .get_res = sec_skcipher_get_res,
- .resource_alloc = sec_skcipher_resource_alloc,
- .resource_free = sec_skcipher_resource_free,
+static const struct sec_req_op sec_skcipher_req_ops = {
.buf_map = sec_skcipher_sgl_map,
.buf_unmap = sec_skcipher_sgl_unmap,
.do_transfer = sec_skcipher_copy_iv,
@@ -736,39 +1083,139 @@ static struct sec_req_op sec_req_ops_tbl = {
.process = sec_process,
};
+static const struct sec_req_op sec_aead_req_ops = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_copy_iv,
+ .bd_fill = sec_aead_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->req_op = &sec_req_ops_tbl;
+ ctx->req_op = &sec_skcipher_req_ops;
return sec_skcipher_init(tfm);
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
- sec_skcipher_exit(tfm);
+ sec_skcipher_uninit(tfm);
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx,
- struct skcipher_request *sk_req)
+static int sec_aead_init(struct crypto_aead *tfm)
{
- u8 c_alg = ctx->c_ctx.c_alg;
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->alg_type = SEC_AEAD;
+ ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ return -EINVAL;
+ }
+
+ ctx->req_op = &sec_aead_req_ops;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_auth_init(ctx);
+ if (ret)
+ goto err_auth_init;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return ret;
+
+err_cipher_init:
+ sec_auth_uninit(ctx);
+err_auth_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_aead_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_auth_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
+static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ pr_err("hisi_sec2: aead init error!\n");
+ return ret;
+ }
+
+ auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(auth_ctx->hash_tfm)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(auth_ctx->hash_tfm);
+ }
+
+ return 0;
+}
+
+static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+}
+
+static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha1");
+}
+
+static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha256");
+}
+
+static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha512");
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- if (!sk_req->src || !sk_req->dst) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
-
+ sreq->c_req.c_len = sk_req->cryptlen;
if (c_alg == SEC_CALG_3DES) {
- if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
return -EINVAL;
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher aes input length error!\n");
return -EINVAL;
}
@@ -789,14 +1236,14 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
- ret = sec_skcipher_param_check(ctx, sk_req);
- if (ret)
- return ret;
-
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
return ctx->req_op->process(ctx, req);
}
@@ -837,7 +1284,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_algs[] = {
+static struct skcipher_alg sec_skciphers[] = {
SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
AES_BLOCK_SIZE, 0)
@@ -867,23 +1314,133 @@ static struct skcipher_alg sec_algs[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+
+ if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ return -EINVAL;
+ }
+
+ /* Support AES only */
+ if (unlikely(c_alg != SEC_CALG_AES)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ return -EINVAL;
+
+ }
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ req->aead_req.aead_req = a_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ ret = sec_aead_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_aead_encrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, true);
+}
+
+static int sec_aead_decrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, false);
+}
+
+#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+ ctx_exit, blk_size, iv_size, max_authsize)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_aead_decrypt,\
+ .encrypt = sec_aead_encrypt,\
+ .ivsize = iv_size,\
+ .maxauthsize = max_authsize,\
+}
+
+#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
+ SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
+ sec_aead_ctx_exit, blksize, ivsize, authsize)
+
+static struct aead_alg sec_aeads[] = {
+ SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
+ sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
+ sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
+ sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+};
+
int sec_register_to_crypto(void)
{
int ret = 0;
/* To avoid repeat register */
- mutex_lock(&sec_algs_lock);
- if (++sec_active_devs == 1)
- ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ goto reg_aead_fail;
+ }
+
+ return ret;
+
+reg_aead_fail:
+ crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- mutex_lock(&sec_algs_lock);
- if (--sec_active_devs == 0)
- crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ }
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 097dce828340..b2786e17d8fe 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -14,6 +14,18 @@ enum sec_calg {
SEC_CALG_SM4 = 0x3,
};
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
@@ -34,6 +46,12 @@ enum sec_bd_type {
SEC_BD_TYPE2 = 0x2,
};
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
enum sec_cipher_dir {
SEC_CIPHER_ENC = 0x1,
SEC_CIPHER_DEC = 0x2,
@@ -48,8 +66,8 @@ enum sec_addr_type {
struct sec_sqe_type2 {
/*
- * mac_len: 0~5 bits
- * a_key_len: 6~10 bits
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
* a_alg: 11~16 bits
*/
__le32 mac_key_alg;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 74f0654028c9..2bbaf1e2dae7 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -32,6 +32,7 @@
#define SEC_PF_DEF_Q_NUM 64
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -221,7 +222,7 @@ static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
if (ret)
return -EINVAL;
- if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
}
@@ -235,7 +236,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -608,6 +609,14 @@ static const struct file_operations sec_dbg_fops = {
.write = sec_debug_write,
};
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
static int sec_core_debug_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -628,9 +637,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 012023c347b1..0e8c7e324fb4 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -202,18 +202,21 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, ret, sg_n;
+ int i, sg_n, sg_n_mapped;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
- if (sg_n > pool->sge_nr)
+
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ if (!sg_n_mapped)
return ERR_PTR(-EINVAL);
- ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!ret)
+ if (sg_n_mapped > pool->sge_nr) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-EINVAL);
+ }
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
@@ -224,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
- for_each_sg(sgl, sg, sg_n, i) {
+ for_each_sg(sgl, sg, sg_n_mapped, i) {
sg_map_to_hw_sg(sg, curr_hw_sge);
inc_hw_sgl_sge(curr_hw_sgl);
curr_hw_sge++;
@@ -260,7 +263,3 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_DESCRIPTION("HiSilicon Accelerator SGL support");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 79fc4dd3fe00..bc1db26598bb 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -11,6 +11,10 @@
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_ZLIB 0x02
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 795428c1d07e..9815d5e3ccd0 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -46,10 +46,8 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- struct scatterlist *src;
- struct scatterlist *dst;
- size_t slen;
- size_t dlen;
+ int sskip;
+ int dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
@@ -119,13 +117,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen)
+ u32 dlen, int sskip, int dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen;
+ sqe->input_data_length = slen - sskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen;
+ sqe->dest_avail_out = dlen - dskip;
sqe->source_addr_l = lower_32_bits(s_addr);
sqe->source_addr_h = upper_32_bits(s_addr);
sqe->dest_addr_l = lower_32_bits(d_addr);
@@ -327,11 +327,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP)
- kfree(req->dst);
- else
- kfree(req->src);
-
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
memset(req, 0, sizeof(struct hisi_zip_req));
@@ -359,8 +354,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
}
dlen = sqe->produced;
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
acomp_req->dlen = dlen + head_size;
@@ -454,20 +449,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
}
}
-static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes,
- size_t remains, struct scatterlist **out)
-{
-#define SPLIT_NUM 2
- size_t split_sizes[SPLIT_NUM];
- int out_mapped_nents[SPLIT_NUM];
-
- split_sizes[0] = bytes;
- split_sizes[1] = remains;
-
- return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out,
- out_mapped_nents, GFP_KERNEL);
-}
-
static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_qp_ctx *qp_ctx,
size_t head_size, bool is_comp)
@@ -475,31 +456,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *q = req_q->q;
struct hisi_zip_req *req_cache;
- struct scatterlist *out[2];
- struct scatterlist *sgl;
- size_t len;
- int ret, req_id;
-
- /*
- * remove/add zlib/gzip head, as hardware operations do not include
- * comp head. so split req->src to get sgl without heads in acomp, or
- * add comp head to req->dst ahead of that hardware output compressed
- * data in sgl splited from req->dst without comp head.
- */
- if (is_comp) {
- sgl = req->dst;
- len = req->dlen - head_size;
- } else {
- sgl = req->src;
- len = req->slen - head_size;
- }
-
- ret = get_sg_skip_bytes(sgl, head_size, len, out);
- if (ret)
- return ERR_PTR(ret);
-
- /* sgl for comp head is useless, so free it now */
- kfree(out[0]);
+ int req_id;
write_lock(&req_q->req_lock);
@@ -507,7 +464,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- kfree(out[1]);
return ERR_PTR(-EBUSY);
}
set_bit(req_id, req_q->req_bitmap);
@@ -515,16 +471,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
+
if (is_comp) {
- req_cache->src = req->src;
- req_cache->dst = out[1];
- req_cache->slen = req->slen;
- req_cache->dlen = req->dlen - head_size;
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
} else {
- req_cache->src = out[1];
- req_cache->dst = req->dst;
- req_cache->slen = req->slen - head_size;
- req_cache->dlen = req->dlen;
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
}
write_unlock(&req_q->req_lock);
@@ -536,6 +489,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
+ struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
@@ -543,16 +497,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
dma_addr_t output;
int ret;
- if (!req->src || !req->slen || !req->dst || !req->dlen)
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
return -EINVAL;
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool,
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
if (IS_ERR(req->hw_src))
return PTR_ERR(req->hw_src);
req->dma_src = input;
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool,
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
&output);
if (IS_ERR(req->hw_dst)) {
@@ -561,8 +515,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen,
- req->dlen);
+ hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ a_req->dlen, req->sskip, req->dskip);
hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
hisi_zip_config_tag(zip_sqe, req->req_id);
@@ -574,9 +528,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..25d5227f74a1 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -332,10 +332,10 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
struct dma_slave_config dma_conf;
int err = -EINVAL;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 64894d8b442a..2cb53fbae841 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -501,8 +501,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
- priv->config.cd_size,
+ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
+ (priv->config.cd_offset << 14) | priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
@@ -974,16 +974,18 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
+ struct safexcel_token *dmmy;
int ret = 0;
/* Prepare command descriptor */
- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
+ &dmmy);
if (IS_ERR(cdesc))
return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
- cdesc->control_data.refresh = 0;
+ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
/* Prepare result descriptor */
@@ -1331,6 +1333,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
/* res token is behind the descr, but ofs must be rounded to buswdth */
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
@@ -1341,6 +1344,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
/* convert dwords to bytes */
priv->config.cd_offset *= sizeof(u32);
+ priv->config.cdsh_offset *= sizeof(u32);
priv->config.rd_offset *= sizeof(u32);
priv->config.res_offset *= sizeof(u32);
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index b4624b5687ce..94016c505abb 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -40,7 +40,8 @@
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 19
+#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */
+#define EIP197_MAX_TOKENS 16
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
@@ -207,6 +208,7 @@
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
+#define EIP197_CDR_DESC_MODE_ADCP BIT(30)
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
@@ -277,9 +279,9 @@
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
-#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29))
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29)
#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29)
-#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30)
/* EIP197_HIA_DFE/DSE_THR_CTRL */
#define EIP197_DxE_THR_CTRL_EN BIT(30)
@@ -553,6 +555,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
{
token->opcode = EIP197_TOKEN_OPCODE_NOOP;
token->packet_length = BIT(2);
+ token->stat = 0;
+ token->instructions = 0;
}
/* Instructions */
@@ -574,14 +578,13 @@ struct safexcel_control_data_desc {
u16 application_id;
u16 rsvd;
- u8 refresh:2;
- u32 context_lo:30;
+ u32 context_lo;
u32 context_hi;
u32 control0;
u32 control1;
- u32 token[EIP197_MAX_TOKENS];
+ u32 token[EIP197_EMB_TOKENS];
} __packed;
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
@@ -591,7 +594,10 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
+#define EIP197_TYPE_BCLA 0x0
#define EIP197_TYPE_EXTENDED 0x3
+#define EIP197_CONTEXT_SMALL 0x2
+#define EIP197_CONTEXT_SIZE_MASK 0x3
/* Basic Command Descriptor format */
struct safexcel_command_desc {
@@ -599,13 +605,16 @@ struct safexcel_command_desc {
u8 rsvd0:5;
u8 last_seg:1;
u8 first_seg:1;
- u16 additional_cdata_size:8;
+ u8 additional_cdata_size:8;
u32 rsvd1;
u32 data_lo;
u32 data_hi;
+ u32 atok_lo;
+ u32 atok_hi;
+
struct safexcel_control_data_desc control_data;
} __packed;
@@ -629,15 +638,20 @@ enum eip197_fw {
struct safexcel_desc_ring {
void *base;
+ void *shbase;
void *base_end;
+ void *shbase_end;
dma_addr_t base_dma;
+ dma_addr_t shbase_dma;
/* write and read pointers */
void *write;
+ void *shwrite;
void *read;
/* descriptor element offset */
- unsigned offset;
+ unsigned int offset;
+ unsigned int shoffset;
};
enum safexcel_alg_type {
@@ -652,6 +666,7 @@ struct safexcel_config {
u32 cd_size;
u32 cd_offset;
+ u32 cdsh_offset;
u32 rd_size;
u32 rd_offset;
@@ -862,7 +877,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 len,
u32 full_data_len,
- dma_addr_t context);
+ dma_addr_t context,
+ struct safexcel_token **atoken);
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index c02995694b41..0c5e80c3f6e3 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -47,8 +47,12 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
- char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aadskip;
+ u8 blocksz;
+ u32 ivmask;
+ u32 ctrinit;
__le32 key[16];
u32 nonce;
@@ -72,251 +76,298 @@ struct safexcel_cipher_req {
int nr_src, nr_dst;
};
-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
- struct safexcel_command_desc *cdesc)
+static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
{
- u32 block_sz = 0;
-
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
- ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
-
- if (ctx->alg == SAFEXCEL_CHACHA20 ||
- ctx->xcm == EIP197_XCM_MODE_CCM) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] =
- (__force u32)cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
- (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* 96 bit IV part */
- memcpy(&cdesc->control_data.token[0], iv, 12);
-
- if (ctx->alg == SAFEXCEL_CHACHA20) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- *(__be32 *)&cdesc->control_data.token[3] =
- cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return 4;
+ }
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 96 bit nonce part */
memcpy(&cdesc->control_data.token[0], &iv[4], 12);
/* 32 bit counter */
cdesc->control_data.token[3] = *(u32 *)iv;
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* Variable length IV part */
- memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
- /* Start variable length counter at 0 */
- memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
- 0, iv[0] + 1);
-
- return;
+ return 4;
}
- if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
- switch (ctx->alg) {
- case SAFEXCEL_DES:
- block_sz = DES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_3DES:
- block_sz = DES3_EDE_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_SM4:
- block_sz = SM4_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_AES:
- block_sz = AES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- default:
- break;
- }
- memcpy(cdesc->control_data.token, iv, block_sz);
- }
+ cdesc->control_data.options |= ctx->ivmask;
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
+ return ctx->blocksz / sizeof(u32);
}
static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
u32 length)
{
struct safexcel_token *token;
+ int ivlen;
- safexcel_cipher_token(ctx, iv, cdesc);
+ ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
+ if (ivlen == 4) {
+ /* No space in cdesc, instruction moves to atoken */
+ cdesc->additional_cdata_size = 1;
+ token = atoken;
+ } else {
+ /* Everything fits in cdesc */
+ token = (struct safexcel_token *)(cdesc->control_data.token + 2);
+ /* Need to pad with NOP */
+ eip197_noop_token(&token[1]);
+ }
- /* skip over worst case IV of 4 dwords, no need to be exact */
- token = (struct safexcel_token *)(cdesc->control_data.token + 4);
+ token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token->packet_length = length;
+ token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
+ token->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
- EIP197_TOKEN_STAT_LAST_HASH;
- token[0].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
+{
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ /* 32 bit nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 96 bit IV part */
+ memcpy(&cdesc->control_data.token[0], iv, 12);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ /* CBC */
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
enum safexcel_cipher_direction direction,
u32 cryptlen, u32 assoclen, u32 digestsize)
{
- struct safexcel_token *token;
+ struct safexcel_token *aadref;
+ int atoksize = 2; /* Start with minimum size */
+ int assocadj = assoclen - ctx->aadskip, aadalign;
- safexcel_cipher_token(ctx, iv, cdesc);
+ /* Always 4 dwords of embedded IV for AEAD modes */
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- if (direction == SAFEXCEL_ENCRYPT) {
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- } else {
+ if (direction == SAFEXCEL_DECRYPT)
cryptlen -= digestsize;
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 15);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
-
- token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[14].packet_length = digestsize |
- EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
- }
-
- if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
- /* For ESP mode (and not GMAC), skip over the IV */
- token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
-
- assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
- }
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
+ /* Construct IV block B0 for the CBC-MAC */
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
+ u8 *cbcmaciv = (u8 *)&atoken[1];
+ __le32 *aadlen = (__le32 *)&atoken[5];
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* Length + nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* Fixup flags byte */
+ *(__le32 *)cbcmaciv =
+ cpu_to_le32(ctx->nonce |
+ ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2));
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ memcpy(cbcmaciv + 4, iv, 8);
+ /* Start counter at 0 */
+ cdesc->control_data.token[3] = 0;
+ /* Message length */
+ *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
+ } else {
+ /* Variable length IV part */
+ memcpy(final_iv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, iv, 15 - iv[0]);
+ /* Start variable length counter at 0 */
+ memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
+ memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ /* fixup flags byte */
+ cbcmaciv[0] |= ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2);
+ /* insert lower 2 bytes of message length */
+ cbcmaciv[14] = cryptlen >> 8;
+ cbcmaciv[15] = cryptlen & 255;
+ }
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE +
+ ((assocadj > 0) << 1);
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(assocadj)) {
+ *aadlen = cpu_to_le32((assocadj >> 8) |
+ (assocadj & 255) << 8);
+ atoken += 6;
+ atoksize += 7;
+ } else {
+ atoken += 5;
+ atoksize += 6;
+ }
- if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
- token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[11].packet_length = cryptlen;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- /* Do not send to crypt engine in case of GMAC */
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ atoken++;
+
+ /* For CCM only, align AAD data towards hash engine */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ aadalign = (assocadj + 2) & 15;
+ atoken->packet_length = assocadj && aadalign ?
+ 16 - aadalign :
+ 0;
+ if (likely(cryptlen)) {
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
- } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
- token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ } else {
+ safexcel_aead_iv(ctx, iv, cdesc);
+
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
+ atoken++;
- if (!ctx->xcm)
- return;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
+ direction == SAFEXCEL_DECRYPT)) {
+ /* Poly-chacha decryption needs a dummy NOP here ... */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16; /* According to Op Manual */
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ }
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[9].packet_length = 0;
- token[9].instructions = AES_BLOCK_SIZE;
+ if (ctx->xcm) {
+ /* For GCM and CCM, obtain enc(Y0) */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ atoken->packet_length = 0;
+ atoken->stat = 0;
+ atoken->instructions = AES_BLOCK_SIZE;
+ atoken++;
+
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
+ atoken++;
+ atoksize += 2;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[10].packet_length = AES_BLOCK_SIZE;
- token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ /* Fixup stat field for AAD direction instruction */
+ aadref->stat = 0;
- if (ctx->xcm != EIP197_XCM_MODE_GCM) {
- u8 *final_iv = (u8 *)cdesc->control_data.token;
- u8 *cbcmaciv = (u8 *)&token[1];
- __le32 *aadlen = (__le32 *)&token[5];
+ /* Process crypto data */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = cryptlen;
- /* Construct IV block B0 for the CBC-MAC */
- token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[0].packet_length = AES_BLOCK_SIZE +
- ((assoclen > 0) << 1);
- token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
- EIP197_TOKEN_INS_TYPE_HASH;
- /* Variable length IV part */
- memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
- /* fixup flags byte */
- cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
- /* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
- /* insert lower 2 bytes of message length */
- cbcmaciv[14] = cryptlen >> 8;
- cbcmaciv[15] = cryptlen & 255;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ /* Fixup instruction field for AAD dir instruction */
+ aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
- if (assoclen) {
- *aadlen = cpu_to_le32((assoclen >> 8) |
- ((assoclen & 0xff) << 8));
- assoclen += 2;
+ /* Do not send to crypt engine in case of GMAC */
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align AAD data towards hash engine */
- token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
- assoclen &= 15;
- token[7].packet_length = assoclen ? 16 - assoclen : 0;
-
- if (likely(cryptlen)) {
- token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align crypto data towards hash engine */
- token[11].stat = 0;
-
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- cryptlen &= 15;
- token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ cryptlen &= 15;
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
+ atoken->stat = 0;
+ /* For CCM only, pad crypto data to the hash engine */
+ atoken++;
+ atoksize++;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16 - cryptlen;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[7].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
}
+ atoken++;
+ atoksize++;
}
+
+ if (direction == SAFEXCEL_ENCRYPT) {
+ /* Append ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ } else {
+ /* Extract ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ atoken++;
+ atoksize++;
+
+ /* Verify ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ atoken->packet_length = digestsize |
+ EIP197_TOKEN_HASH_RESULT_VERIFY;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
+
+ /* Fixup length of the token in the command descriptor */
+ cdesc->additional_cdata_size = atoksize;
}
static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
@@ -329,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
@@ -382,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
case SAFEXCEL_DES:
err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_AES:
err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
@@ -450,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
}
- crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_RES_MASK);
-
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
@@ -470,8 +516,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-badkey_expflags:
memzero_explicit(&keys, sizeof(keys));
return err;
}
@@ -656,6 +700,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
+ struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
@@ -730,56 +775,60 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
- /* The EIP cannot deal with zero length input packets! */
- if (totlen == 0)
- totlen = 1;
+ if (!totlen) {
+ /*
+ * The EIP97 cannot deal with zero length input packets!
+ * So stuff a dummy command descriptor indicating a 1 byte
+ * (dummy) input packet, using the context record as source.
+ */
+ first_cdesc = safexcel_add_cdesc(priv, ring,
+ 1, 1, ctx->base.ctxr_dma,
+ 1, 1, ctx->base.ctxr_dma,
+ &atoken);
+ if (IS_ERR(first_cdesc)) {
+ /* No space left in the command descriptor ring */
+ ret = PTR_ERR(first_cdesc);
+ goto cdesc_rollback;
+ }
+ n_cdesc = 1;
+ goto skip_cdesc;
+ }
/* command descriptors */
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - len < 0)
+ if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - len),
sg_dma_address(sg), len, totlen,
- ctx->base.ctxr_dma);
+ ctx->base.ctxr_dma, &atoken);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
goto cdesc_rollback;
}
- n_cdesc++;
- if (n_cdesc == 1) {
+ if (!n_cdesc)
first_cdesc = cdesc;
- }
+ n_cdesc++;
queued -= len;
if (!queued)
break;
}
-
- if (unlikely(!n_cdesc)) {
- /*
- * Special case: zero length input buffer.
- * The engine always needs the 1st command descriptor, however!
- */
- first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
- ctx->base.ctxr_dma);
- n_cdesc = 1;
- }
-
+skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
if (ctx->aead)
- safexcel_aead_token(ctx, iv, first_cdesc,
+ safexcel_aead_token(ctx, iv, first_cdesc, atoken,
sreq->direction, cryptlen,
assoclen, digestsize);
else
- safexcel_skcipher_token(ctx, iv, first_cdesc,
+ safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
/* result descriptors */
@@ -1166,6 +1215,8 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
return 0;
}
@@ -1230,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1264,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1300,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -1336,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -1381,10 +1437,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
/* exclude the nonce here */
keylen = len - CTR_RFC3686_NONCE_SIZE;
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -1410,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -1445,6 +1500,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1452,7 +1508,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1468,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1505,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1537,6 +1597,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1544,7 +1605,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1560,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1597,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1652,6 +1717,9 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
+ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
@@ -1840,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1874,6 +1944,8 @@ static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1908,6 +1980,8 @@ static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1942,6 +2016,8 @@ static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1976,6 +2052,8 @@ static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2010,6 +2088,8 @@ static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2044,6 +2124,8 @@ static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2078,6 +2160,8 @@ static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2112,6 +2196,8 @@ static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2146,6 +2232,8 @@ static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2362,10 +2450,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* Only half of the key data is cipher key */
keylen = (len >> 1);
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2381,10 +2467,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* The other half is the tweak key */
ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2412,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->xts = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
return 0;
@@ -2472,7 +2557,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2496,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2532,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->hkaes))
- return PTR_ERR(ctx->hkaes);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->hkaes);
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
@@ -2589,7 +2668,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2632,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
ctx->state_sz = 3 * AES_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_CCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+ ctx->ctrinit = 0;
return 0;
}
@@ -2719,10 +2798,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- if (len != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2734,6 +2812,7 @@ static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->ctrinit = 0;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
return 0;
}
@@ -2775,10 +2854,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
ctx->nonce = *(u32 *)(key + len);
}
- if (len != CHACHA_KEY_SIZE) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2885,6 +2963,7 @@ static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
ctx->alg = SAFEXCEL_CHACHA20;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->ctrinit = 0;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
ctx->state_sz = 0; /* Precomputed by HW */
return 0;
@@ -2933,6 +3012,7 @@ static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_chachapoly_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -2971,10 +3051,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- if (len != SM4_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != SM4_KEY_SIZE)
return -EINVAL;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, SM4_KEY_SIZE))
@@ -3013,6 +3091,8 @@ static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -3047,6 +3127,7 @@ static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -3083,6 +3164,7 @@ static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -3119,6 +3201,7 @@ static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -3169,6 +3252,7 @@ static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -3228,6 +3312,7 @@ static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
safexcel_aead_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -3335,6 +3420,7 @@ static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
ctx->state_sz = SM3_DIGEST_SIZE;
return 0;
@@ -3473,6 +3559,7 @@ static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -3607,6 +3694,7 @@ static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_ccm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2134daef24f6..43962bc709c6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -87,12 +87,14 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
input_length &= 15;
if (unlikely(cbcmac && input_length)) {
+ token[0].stat = 0;
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[1].packet_length = 16 - input_length;
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ eip197_noop_token(&token[1]);
}
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -101,6 +103,8 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
token[2].packet_length = result_length;
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+ eip197_noop_token(&token[3]);
}
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
@@ -111,6 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
+ cdesc->control_data.control1 = 0;
/*
* Copy the input digest if needed, and setup the context
@@ -277,7 +282,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
sreq->processed = sreq->block_sz;
sreq->hmac = 0;
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE)
+ ctx->base.needs_inv = true;
areq->nbytes = 0;
safexcel_ahash_enqueue(areq);
@@ -314,6 +320,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
+ struct safexcel_token *dmmy;
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
u64 queued, len;
@@ -397,7 +404,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
req->cache_dma, cache_len,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma,
+ &dmmy);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -436,7 +444,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
sg_dma_address(sg) + skip, sglen,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma, &dmmy);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
@@ -1911,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- if (keylen != sizeof(u32)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
memcpy(ctx->ipad, key, sizeof(u32));
return 0;
@@ -1987,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
@@ -2057,18 +2061,14 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
/* precompute the XCBC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2088,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_cipher_setkey(ctx->kaes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2160,10 +2158,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
for (i = 0; i < len / sizeof(u32); i++)
ctx->ipad[i + 8] =
@@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 9237ba745c2f..e454c3d44f07 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,6 +14,11 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
+ int i;
+ struct safexcel_command_desc *cdesc;
+ dma_addr_t atok;
+
+ /* Actual command descriptor ring */
cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
@@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
+ /* Command descriptor shadow ring for storing additional token data */
+ cdr->shoffset = priv->config.cdsh_offset;
+ cdr->shbase = dmam_alloc_coherent(priv->dev,
+ cdr->shoffset *
+ EIP197_DEFAULT_RING_SIZE,
+ &cdr->shbase_dma, GFP_KERNEL);
+ if (!cdr->shbase)
+ return -ENOMEM;
+ cdr->shwrite = cdr->shbase;
+ cdr->shbase_end = cdr->shbase + cdr->shoffset *
+ (EIP197_DEFAULT_RING_SIZE - 1);
+
+ /*
+ * Populate command descriptors with physical pointers to shadow descs.
+ * Note that we only need to do this once if we don't overwrite them.
+ */
+ cdesc = cdr->base;
+ atok = cdr->shbase_dma;
+ for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
+ cdesc->atok_lo = lower_32_bits(atok);
+ cdesc->atok_hi = upper_32_bits(atok);
+ cdesc = (void *)cdesc + cdr->offset;
+ atok += cdr->shoffset;
+ }
+
rdr->offset = priv->config.rd_offset;
+ /* Use shoffset for result token offset here */
+ rdr->shoffset = priv->config.res_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
-static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_desc_ring *ring)
+static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ bool first,
+ struct safexcel_token **atoken)
{
void *ptr = ring->write;
+ if (first)
+ *atoken = ring->shwrite;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end) {
+ ring->write = ring->base;
+ ring->shwrite = ring->shbase;
+ } else {
+ ring->write += ring->offset;
+ ring->shwrite += ring->shoffset;
+ }
+
+ return ptr;
+}
+
+static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ struct result_data_desc **rtoken)
+{
+ void *ptr = ring->write;
+
+ /* Result token at relative offset shoffset */
+ *rtoken = ring->write + ring->shoffset;
+
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
@@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
if (ring->write == ring->read)
return;
- if (ring->write == ring->base)
+ if (ring->write == ring->base) {
ring->write = ring->base_end;
- else
+ ring->shwrite = ring->shbase_end;
+ } else {
ring->write -= ring->offset;
+ ring->shwrite -= ring->shoffset;
+ }
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
@@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
- dma_addr_t context) {
+ dma_addr_t context,
+ struct safexcel_token **atoken)
+{
struct safexcel_command_desc *cdesc;
- int i;
- cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+ cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
+ first, atoken);
if (IS_ERR(cdesc))
return cdesc;
- memset(cdesc, 0, sizeof(struct safexcel_command_desc));
-
- cdesc->first_seg = first;
- cdesc->last_seg = last;
cdesc->particle_size = data_len;
+ cdesc->rsvd0 = 0;
+ cdesc->last_seg = last;
+ cdesc->first_seg = first;
+ cdesc->additional_cdata_size = 0;
+ cdesc->rsvd1 = 0;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
- if (first && context) {
- struct safexcel_token *token =
- (struct safexcel_token *)cdesc->control_data.token;
-
+ if (first) {
/*
* Note that the length here MUST be >0 or else the EIP(1)97
* may hang. Newer EIP197 firmware actually incorporates this
@@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
cdesc->control_data.packet_length = full_data_len ?: 1;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
- EIP197_OPTION_CTX_CTRL_IN_CMD;
- cdesc->control_data.context_lo =
- (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+ EIP197_OPTION_CTX_CTRL_IN_CMD |
+ EIP197_OPTION_RC_AUTO;
+ cdesc->control_data.type = EIP197_TYPE_BCLA;
+ cdesc->control_data.context_lo = lower_32_bits(context) |
+ EIP197_CONTEXT_SMALL;
cdesc->control_data.context_hi = upper_32_bits(context);
-
- if (priv->version == EIP197B_MRVL ||
- priv->version == EIP197D_MRVL)
- cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
-
- /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
- cdesc->control_data.refresh = 2;
-
- for (i = 0; i < EIP197_MAX_TOKENS; i++)
- eip197_noop_token(&token[i]);
}
return cdesc;
@@ -171,19 +227,27 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
+ struct result_data_desc *rtoken;
- rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+ rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
+ &rtoken);
if (IS_ERR(rdesc))
return rdesc;
- memset(rdesc, 0, sizeof(struct safexcel_result_desc));
-
- rdesc->first_seg = first;
+ rdesc->particle_size = len;
+ rdesc->rsvd0 = 0;
+ rdesc->descriptor_overflow = 0;
+ rdesc->buffer_overflow = 0;
rdesc->last_seg = last;
+ rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
- rdesc->particle_size = len;
+ rdesc->rsvd1 = 0;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
+ /* Clear length & error code in result token */
+ rtoken->packet_length = 0;
+ rtoken->error_code = 0;
+
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 391e3b4df364..ad73fc946682 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -740,7 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ int err;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
@@ -757,12 +757,13 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
case 24: keylen_cfg = MOD_AES192; break;
case 32: keylen_cfg = MOD_AES256; break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else {
- crypto_des_verify_key(tfm, key);
+ err = crypto_des_verify_key(tfm, key);
+ if (err)
+ return err;
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
@@ -819,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
@@ -835,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1096,7 +1086,6 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_maxauthsize(tfm);
int ret;
@@ -1120,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- goto out;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1169,7 +1147,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d8e8c857770c..c24f34a48cef 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int i;
ret = aes_expandkey(&ctx->aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90880a81c534..78d660d963e2 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
break;
default:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1033,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f438b425c655..435ac1c83df9 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
/*
* AES 128 is supposed by the hardware, store key into temporary
@@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
- if (!ret)
- return 0;
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
+ return crypto_sync_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 63bd565048f4..f5c468f2cc82 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9bbedbccfadf..32dc00dc570b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -13,6 +13,7 @@
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
@@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
{
struct aead_request *req = dd->aead_req;
- dd->flags &= ~FLAGS_BUSY;
dd->in_sg = NULL;
dd->out_sg = NULL;
- req->base.complete(&req->base, ret);
+ crypto_finalize_aead_request(dd->engine, req, ret);
+
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
}
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
@@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
}
omap_aes_gcm_finish_req(dd, ret);
- omap_aes_gcm_handle_queue(dd, NULL);
}
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
@@ -120,11 +122,16 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_ASSOC_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
if (cryptlen) {
tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
+ if (nsg)
+ sg_unmark_end(dd->in_sgl);
+
ret = omap_crypto_align_sg(&tmp, cryptlen,
AES_BLOCK_SIZE, &dd->in_sgl[nsg],
OMAP_CRYPTO_COPY_DATA |
@@ -132,6 +139,8 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_IN_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
dd->in_sg = dd->in_sgl;
@@ -142,18 +151,20 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->out_sg = req->dst;
dd->orig_out = req->dst;
- dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
+ dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
flags = 0;
if (req->src == req->dst || dd->out_sg == sg_arr)
flags |= OMAP_CRYPTO_FORCE_COPY;
- ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
- AES_BLOCK_SIZE, &dd->out_sgl,
- flags,
- FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
- if (ret)
- return ret;
+ if (cryptlen) {
+ ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
+ AES_BLOCK_SIZE, &dd->out_sgl,
+ flags,
+ FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+ if (ret)
+ return ret;
+ }
dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
@@ -161,62 +172,12 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
return 0;
}
-static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
-{
- struct omap_aes_gcm_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
{
- struct scatterlist iv_sg, tag_sg;
- struct skcipher_request *sk_req;
- struct omap_aes_gcm_result result;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- int ret = 0;
-
- sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
- if (!sk_req) {
- pr_err("skcipher: Failed to allocate request\n");
- return -ENOMEM;
- }
-
- init_completion(&result.completion);
-
- sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
- sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
- skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- omap_aes_gcm_complete, &result);
- ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
- skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
- NULL);
- ret = crypto_skcipher_encrypt(sk_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret) {
- ret = result.err;
- if (!ret) {
- reinit_completion(&result.completion);
- break;
- }
- }
- /* fall through */
- default:
- pr_err("Encryption of IV failed for GCM mode\n");
- break;
- }
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- skcipher_request_free(sk_req);
- return ret;
+ aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
+ return 0;
}
void omap_aes_gcm_dma_out_callback(void *data)
@@ -246,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data)
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
struct aead_request *req)
{
- struct omap_aes_ctx *ctx;
- struct aead_request *backlog;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = aead_enqueue_request(&dd->aead_queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
-
- backlog = aead_get_backlog(&dd->aead_queue);
- req = aead_dequeue_request(&dd->aead_queue);
- if (req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!req)
- return ret;
+ return crypto_transfer_aead_request_to_engine(dd->engine, req);
- if (backlog)
- backlog->base.complete(&backlog->base, -EINPROGRESS);
+ return 0;
+}
- ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- rctx = aead_request_ctx(req);
+static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int err;
- dd->ctx = ctx;
- rctx->dd = dd;
dd->aead_req = req;
rctx->mode &= FLAGS_MODE_MASK;
@@ -286,16 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
if (err)
return err;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
+ dd->ctx = &ctx->octx;
- if (err) {
- omap_aes_gcm_finish_req(dd, err);
- omap_aes_gcm_handle_queue(dd, NULL);
- }
-
- return ret;
+ return omap_aes_write_ctrl(dd);
}
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
@@ -350,36 +288,39 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
int omap_aes_4106gcm_encrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
FLAGS_RFC4106_GCM);
}
int omap_aes_4106gcm_decrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
}
int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
+ memcpy(ctx->octx.key, key, keylen);
+ ctx->octx.keylen = keylen;
return 0;
}
@@ -387,19 +328,63 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (keylen < 4)
return -EINVAL;
-
keylen -= 4;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
- memcpy(ctx->key, key, keylen);
- memcpy(ctx->nonce, key + keylen, 4);
- ctx->keylen = keylen;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(ctx->octx.key, key, keylen);
+ memcpy(ctx->octx.nonce, key + keylen, 4);
+ ctx->octx.keylen = keylen;
+
+ return 0;
+}
+
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ int ret = 0;
+
+ if (!dd)
+ return -ENODEV;
+
+ if (dd->in_sg_len)
+ ret = omap_aes_crypt_dma_start(dd);
+ else
+ omap_aes_gcm_dma_out_callback(dd);
+
+ return ret;
+}
+
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a1fc03ed01f3..824ddf2a66ff 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
struct scatterlist *out_sg,
int in_sg_len, int out_sg_len)
{
- struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc;
struct dma_slave_config cfg;
int ret;
if (dd->pio_only) {
scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ if (out_sg_len)
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
/* No callback necessary */
tx_in->callback_param = dd;
+ tx_in->callback = NULL;
/* OUT */
- ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
- if (ret) {
- dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
- ret);
- return ret;
- }
+ if (out_sg_len) {
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
- tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!tx_out) {
- dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
- return -EINVAL;
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
+ out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ cb_desc = tx_out;
+ } else {
+ cb_desc = tx_in;
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ cb_desc->callback = omap_aes_gcm_dma_out_callback;
else
- tx_out->callback = omap_aes_dma_out_callback;
- tx_out->callback_param = dd;
+ cb_desc->callback = omap_aes_dma_out_callback;
+ cb_desc->callback_param = dd;
+
dmaengine_submit(tx_in);
- dmaengine_submit(tx_out);
+ if (tx_out)
+ dmaengine_submit(tx_out);
dma_async_issue_pending(dd->dma_lch_in);
- dma_async_issue_pending(dd->dma_lch_out);
+ if (out_sg_len)
+ dma_async_issue_pending(dd->dma_lch_out);
/* start DMA */
dd->pdata->trigger(dd, dd->total);
@@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
return -EINVAL;
}
- err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
- if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- return -EINVAL;
+ if (dd->out_sg_len) {
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
}
}
@@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
+ if (dd->out_sg_len)
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
}
return err;
@@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
return omap_aes_crypt_dma_start(dd);
}
+static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
+}
+
static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
@@ -494,12 +517,16 @@ static void omap_aes_done_task(unsigned long data)
omap_aes_crypt_dma_stop(dd);
}
- omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+ omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
- omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+ omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ /* Update IV output */
+ if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
+ omap_aes_copy_ivout(dd, dd->req->iv);
+
omap_aes_finish_req(dd, 0);
pr_debug("exit\n");
@@ -513,6 +540,9 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct omap_aes_dev *dd;
int ret;
+ if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR))
+ return -EINVAL;
+
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
@@ -627,36 +657,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
-{
- struct omap_aes_dev *dd = NULL;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int err;
-
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
-
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
-
- tfm->reqsize = sizeof(struct omap_aes_reqctx);
- ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- if (IS_ERR(ctx->ctr)) {
- pr_warn("could not load aes driver for encrypting IV\n");
- return PTR_ERR(ctx->ctr);
- }
-
- return 0;
-}
-
static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -667,19 +667,6 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
ctx->fallback = NULL;
}
-static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
-{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-
- if (ctx->ctr)
- crypto_free_skcipher(ctx->ctr);
-}
-
/* ********************** ALGS ************************************ */
static struct skcipher_alg algs_ecb_cbc[] = {
@@ -732,7 +719,7 @@ static struct skcipher_alg algs_ctr[] = {
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct omap_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -763,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
.encrypt = omap_aes_gcm_encrypt,
.decrypt = omap_aes_gcm_decrypt,
},
@@ -783,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
},
@@ -1296,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
+
+ sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
return 0;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d3575231e31..2d111bf906e1 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -9,6 +9,7 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/aes.h>
#include <crypto/engine.h>
#define DST_MAXBURST 4
@@ -79,7 +80,6 @@
#define FLAGS_INIT BIT(5)
#define FLAGS_FAST BIT(6)
-#define FLAGS_BUSY BIT(7)
#define FLAGS_IN_DATA_ST_SHIFT 8
#define FLAGS_OUT_DATA_ST_SHIFT 10
@@ -98,7 +98,11 @@ struct omap_aes_ctx {
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
struct crypto_sync_skcipher *fallback;
- struct crypto_skcipher *ctr;
+};
+
+struct omap_aes_gcm_ctx {
+ struct omap_aes_ctx octx;
+ struct crypto_aes_ctx actx;
};
struct omap_aes_reqctx {
@@ -202,8 +206,12 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int omap_aes_gcm_encrypt(struct aead_request *req);
int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
int omap_aes_4106gcm_encrypt(struct aead_request *req);
int omap_aes_4106gcm_decrypt(struct aead_request *req);
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize);
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm);
int omap_aes_write_ctrl(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 7d592d93bb1c..cc88b7362bc2 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -154,6 +154,41 @@ int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
}
EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
+static void omap_crypto_copy_data(struct scatterlist *src,
+ struct scatterlist *dst,
+ int offset, int len)
+{
+ int amt;
+ void *srcb, *dstb;
+ int srco = 0, dsto = offset;
+
+ while (src && dst && len) {
+ if (srco >= src->length) {
+ srco -= src->length;
+ src = sg_next(src);
+ continue;
+ }
+
+ if (dsto >= dst->length) {
+ dsto -= dst->length;
+ dst = sg_next(dst);
+ continue;
+ }
+
+ amt = min(src->length - srco, dst->length - dsto);
+ amt = min(len, amt);
+
+ srcb = sg_virt(src) + srco;
+ dstb = sg_virt(dst) + dsto;
+
+ memcpy(dstb, srcb, amt);
+
+ srco += amt;
+ dsto += amt;
+ len -= amt;
+ }
+}
+
void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
int offset, int len, u8 flags_shift,
unsigned long flags)
@@ -171,7 +206,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
pages = get_order(len);
if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
- scatterwalk_map_and_copy(buf, orig, offset, len, 1);
+ omap_crypto_copy_data(sg, orig, offset, len);
if (flags & OMAP_CRYPTO_DATA_COPIED)
free_pages((unsigned long)buf, pages);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 4c4dbc2b377e..8eda43319204 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -597,6 +597,7 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
static void omap_des_done_task(unsigned long data)
{
struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ int i;
pr_debug("enter done_task\n");
@@ -615,6 +616,11 @@ static void omap_des_done_task(unsigned long data)
omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ for (i = 0; i < 2; i++)
+ ((u32 *)dd->req->iv)[i] =
+ omap_des_read(dd, DES_REG_IV(dd, i));
+
omap_des_finish_req(dd, 0);
pr_debug("exit\n");
@@ -631,10 +637,11 @@ static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
+ if (!req->cryptlen)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
return -EINVAL;
- }
dd = omap_des_find_dev(ctx);
if (!dd)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ac80bc6af093..4f915a4ef5b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,6 +112,8 @@
#define FLAGS_BE32_SHA1 8
#define FLAGS_SGS_COPIED 9
#define FLAGS_SGS_ALLOCED 10
+#define FLAGS_HUGE 11
+
/* context flags */
#define FLAGS_FINUP 16
@@ -136,6 +138,8 @@
#define BUFLEN SHA512_BLOCK_SIZE
#define OMAP_SHA_DMA_THRESHOLD 256
+#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
+
struct omap_sham_dev;
struct omap_sham_reqctx {
@@ -644,6 +648,8 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
struct scatterlist *tmp;
int offset = ctx->offset;
+ ctx->total = new_len;
+
if (ctx->bufcnt)
n++;
@@ -661,15 +667,16 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
tmp = sg_next(tmp);
ctx->sg_len++;
+ new_len -= ctx->bufcnt;
}
while (sg && new_len) {
int len = sg->length - offset;
- if (offset) {
+ if (len <= 0) {
offset -= sg->length;
- if (offset < 0)
- offset = 0;
+ sg = sg_next(sg);
+ continue;
}
if (new_len < len)
@@ -677,33 +684,37 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
if (len > 0) {
new_len -= len;
- sg_set_page(tmp, sg_page(sg), len, sg->offset);
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
+ offset = 0;
+ ctx->offset = 0;
+ ctx->sg_len++;
if (new_len <= 0)
- sg_mark_end(tmp);
+ break;
tmp = sg_next(tmp);
- ctx->sg_len++;
}
sg = sg_next(sg);
}
+ if (tmp)
+ sg_mark_end(tmp);
+
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
return 0;
}
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
- struct scatterlist *sg, int bs, int new_len)
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
{
int pages;
void *buf;
- int len;
-
- len = new_len + ctx->bufcnt;
- pages = get_order(ctx->total);
+ pages = get_order(new_len);
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
if (!buf) {
@@ -715,14 +726,15 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
- ctx->total - ctx->bufcnt, 0);
+ min(new_len, ctx->total) - ctx->bufcnt, 0);
sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, buf, len);
+ sg_set_buf(ctx->sgl, buf, new_len);
ctx->sg = ctx->sgl;
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
ctx->sg_len = 1;
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
- ctx->offset = 0;
+ ctx->total = new_len;
return 0;
}
@@ -737,6 +749,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
struct scatterlist *sg_tmp = sg;
int new_len;
int offset = rctx->offset;
+ int bufcnt = rctx->bufcnt;
if (!sg || !sg->length || !nbytes)
return 0;
@@ -751,12 +764,28 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
else
new_len = (new_len - 1) / bs * bs;
+ if (!new_len)
+ return 0;
+
if (nbytes != new_len)
list_ok = false;
while (nbytes > 0 && sg_tmp) {
n++;
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ list_ok = false;
+
+ continue;
+ }
+
#ifdef CONFIG_ZONE_DMA
if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
aligned = false;
@@ -794,13 +823,27 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
}
}
+ if (new_len > OMAP_SHA_MAX_DMA_LEN) {
+ new_len = OMAP_SHA_MAX_DMA_LEN;
+ aligned = false;
+ }
+
if (!aligned)
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
else if (!list_ok)
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+ rctx->total = new_len;
+ rctx->offset += new_len;
rctx->sg_len = n;
- rctx->sg = sg;
+ if (rctx->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
return 0;
}
@@ -810,31 +853,35 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
- int nbytes;
+ unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
- int xmit_len, hash_later;
+ int hash_later;
bs = get_block_size(rctx);
+ nbytes = rctx->bufcnt;
+
if (update)
- nbytes = req->nbytes;
- else
- nbytes = 0;
+ nbytes += req->nbytes - rctx->offset;
- rctx->total = nbytes + rctx->bufcnt;
+ dev_dbg(rctx->dd->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset,
+ rctx->bufcnt);
- if (!rctx->total)
+ if (!nbytes)
return 0;
- if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
int len = bs - rctx->bufcnt % bs;
- if (len > nbytes)
- len = nbytes;
+ if (len > req->nbytes)
+ len = req->nbytes;
scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
0, len, 0);
rctx->bufcnt += len;
- nbytes -= len;
rctx->offset = len;
}
@@ -845,64 +892,25 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (ret)
return ret;
- xmit_len = rctx->total;
-
- if (!IS_ALIGNED(xmit_len, bs)) {
- if (final)
- xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
- else
- xmit_len = xmit_len / bs * bs;
- } else if (!final) {
- xmit_len -= bs;
- }
-
- hash_later = rctx->total - xmit_len;
+ hash_later = nbytes - rctx->total;
if (hash_later < 0)
hash_later = 0;
- if (rctx->bufcnt && nbytes) {
- /* have data from previous operation and current */
- sg_init_table(rctx->sgl, 2);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
-
- sg_chain(rctx->sgl, 2, req->src);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len++;
- } else if (rctx->bufcnt) {
- /* have buffered data only */
- sg_init_table(rctx->sgl, 1);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len = 1;
- }
-
if (hash_later) {
- int offset = 0;
-
- if (hash_later > req->nbytes) {
- memcpy(rctx->buffer, rctx->buffer + xmit_len,
- hash_later - req->nbytes);
- offset = hash_later - req->nbytes;
- }
-
- if (req->nbytes) {
- scatterwalk_map_and_copy(rctx->buffer + offset,
- req->src,
- offset + req->nbytes -
- hash_later, hash_later, 0);
- }
+ scatterwalk_map_and_copy(rctx->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
rctx->bufcnt = hash_later;
} else {
rctx->bufcnt = 0;
}
- if (!final)
- rctx->total = xmit_len;
+ if (hash_later > rctx->buflen)
+ set_bit(FLAGS_HUGE, &rctx->dd->flags);
+
+ rctx->total = min(nbytes, rctx->total);
return 0;
}
@@ -998,10 +1006,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
- bool final = ctx->flags & BIT(FLAGS_FINUP);
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
ctx->total < dd->fallback_sz)
@@ -1024,6 +1033,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
+ if (dd->flags & BIT(FLAGS_HUGE))
+ return 0;
+
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
/*
* faster to handle last block with cpu or
@@ -1083,7 +1095,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length + ctx->bufcnt));
+ get_order(ctx->sg->length));
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
kfree(ctx->sg);
@@ -1092,6 +1104,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ if (dd->flags & BIT(FLAGS_HUGE)) {
+ dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
+ omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS &&
+ (ctx->flags & BIT(FLAGS_FINUP)))
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ omap_sham_final_req(dd);
+ }
+ return;
+ }
+
if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1107,6 +1134,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
+ ctx->offset = 0;
+
if (req->base.complete)
req->base.complete(&req->base, err);
}
@@ -1158,7 +1187,7 @@ retry:
/* request has changed - restore hash */
dd->pdata->copy_hash(req, 0);
- if (ctx->op == OP_UPDATE) {
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
@@ -1730,6 +1759,8 @@ static void omap_sham_done_task(unsigned long data)
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
int err = 0;
+ dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
+
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
omap_sham_handle_queue(dd, NULL);
return;
@@ -2223,6 +2254,8 @@ static int omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
+ sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
+
return 0;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index c5b60f50e1b5..594d6b1695d5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
- u32 *flags = &tfm->crt_flags;
struct crypto_aes_ctx gen_aes;
int cpu;
- if (key_len % 8) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len % 8)
return -EINVAL;
- }
/*
* If the hardware is capable of generating the extended key
@@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ctx->cword.encrypt.keygen = 1;
ctx->cword.decrypt.keygen = 1;
- if (aes_expandkey(&gen_aes, in_key, key_len)) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (aes_expandkey(&gen_aes, in_key, key_len))
return -EINVAL;
- }
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index ddf1b549fdca..c826abe79e79 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -190,13 +190,11 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
return padlock_sha256_finup(desc, buf, 0, out);
}
-static int padlock_cra_init(struct crypto_tfm *tfm)
+static int padlock_init_tfm(struct crypto_shash *hash)
{
- struct crypto_shash *hash = __crypto_shash_cast(tfm);
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
struct crypto_shash *fallback_tfm;
- int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
@@ -204,21 +202,17 @@ static int padlock_cra_init(struct crypto_tfm *tfm)
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
+ return PTR_ERR(fallback_tfm);
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-
-out:
- return err;
}
-static void padlock_cra_exit(struct crypto_tfm *tfm)
+static void padlock_exit_tfm(struct crypto_shash *hash)
{
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
crypto_free_shash(ctx->fallback);
}
@@ -231,6 +225,8 @@ static struct shash_alg sha1_alg = {
.final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha1_state),
.base = {
@@ -241,8 +237,6 @@ static struct shash_alg sha1_alg = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
@@ -254,6 +248,8 @@ static struct shash_alg sha256_alg = {
.final = padlock_sha256_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha256_state),
.base = {
@@ -264,8 +260,6 @@ static struct shash_alg sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 29da449b3e9e..7384e91c8b32 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -490,7 +487,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -780,10 +776,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
- if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len > AES_MAX_KEY_SIZE)
return -EINVAL;
- }
/*
* IPSec engine only supports 128 and 256 bit AES keys. If we get a
@@ -805,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
CRYPTO_TFM_REQ_MASK);
err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
-
if (err)
goto sw_setkey_failed;
}
@@ -830,7 +818,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -1595,6 +1582,11 @@ static const struct of_device_id spacc_of_id_table[] = {
MODULE_DEVICE_TABLE(of, spacc_of_id_table);
#endif /* CONFIG_OF */
+static void spacc_tasklet_kill(void *data)
+{
+ tasklet_kill(data);
+}
+
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret;
@@ -1637,6 +1629,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
+ &engine->complete);
+ if (ret)
+ return ret;
+
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@@ -1694,8 +1694,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
platform_set_drvdata(pdev, engine);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 35bca76b640f..833bb1d3a11b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
@@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
- goto bad_key;
+ return -EINVAL;
qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 8caa04e1ec43..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- skcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index da1188abc9ba..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,6 +225,87 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 0a44a6eeacf5..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 40a59214d2e1..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 95ab16fc8fd6..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index fee07323f8f9..4217b745f124 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data)
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data)
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
@@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (keylen) {
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
break;
@@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
@@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm)
memset(ctx, 0, sizeof(*ctx));
crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
+
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ qce_skcipher_init(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
@@ -270,6 +282,7 @@ struct qce_skcipher_def {
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->min_keysize = def->min_keysize;
alg->max_keysize = def->max_keysize;
@@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
- alg->init = qce_skcipher_init;
- alg->exit = qce_skcipher_exit;
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index ca4de4ddfe1f..4a75c8e1fa6c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d4ea2f11ca68..466e30bd529c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
ctx->keylen = keylen;
@@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
- return ret;
+ return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 1aba9372cd23..4ef3eb11361c 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -4,7 +4,7 @@ config CRYPTO_DEV_STM32_CRC
depends on ARCH_STM32
select CRYPTO_HASH
help
- This enables support for the CRC32 hw accelerator which can be found
+ This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_HASH
@@ -17,7 +17,7 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_SHA256
select CRYPTO_ENGINE
help
- This enables support for the HASH hw accelerator which can be found
+ This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_CRYP
@@ -27,5 +27,5 @@ config CRYPTO_DEV_STM32_CRYP
select CRYPTO_ENGINE
select CRYPTO_LIB_DES
help
- This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 9e11c3480353..8e92e4ac79f1 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cfc8e0e37bee..167b80eec437 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -518,10 +518,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index d71d65846e47..9c6db7f698c4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc,
return 0;
badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
+ goto out;
err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
if (err)
@@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static void talitos_sg_unmap(struct device *dev,
@@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
keylen == AES_KEYSIZE_256)
return skcipher_setkey(cipher, key, keylen);
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
return -EINVAL;
}
@@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Must get the hash of the long key */
ret = keyhash(tfm, key, keylen, hash);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
keysize = digestsize;
memcpy(ctx->key, hash, digestsize);
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b731895aa241..f56d65c56ccf 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -11,18 +11,18 @@ config CRYPTO_DEV_UX500_CRYP
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
- This selects the crypto driver for the UX500_CRYP hardware. It supports
- AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+ This selects the crypto driver for the UX500_CRYP hardware. It supports
+ AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
+ help
+ This selects the hash driver for the UX500_HASH hardware.
+ Depends on UX500/STM DMA if running in DMA mode.
config CRYPTO_DEV_UX500_DEBUG
bool "Activate ux500 platform debug-mode for crypto and hash block"
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 95fb694a2667..800dfc4d16c4 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
default:
pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 4b71e80951b7..fd045e64972a 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions(
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
- goto bad_key;
+ return -EINVAL;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
- goto bad_key;
+ return -EINVAL;
/* Create encryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
@@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions(
return ret;
}
return 0;
-
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/* Note: kernel crypto API realization */
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index d59e736882f6..9fee1b1532a4 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
u8 tweak[AES_BLOCK_SIZE];
int ret;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 35535833b6f7..0b1df12e0f21 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -77,7 +77,7 @@ config DEVFREQ_GOV_PASSIVE
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ
- tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_PASSIVE
@@ -91,6 +91,16 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_IMX8M_DDRC_DEVFREQ
+ tristate "i.MX8M DDRC DEVFREQ Driver"
+ depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows
+ adjusting DRAM frequency.
+
config ARM_TEGRA_DEVFREQ
tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver"
depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \
@@ -115,14 +125,15 @@ config ARM_TEGRA20_DEVFREQ
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
- depends on ARCH_ROCKCHIP
+ depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT
help
- This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
- It sets the frequency for the memory controller and reads the usage counts
- from hardware.
+ This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+ It sets the frequency for the memory controller and reads the usage counts
+ from hardware.
source "drivers/devfreq/event/Kconfig"
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 338ae8440db6..3eb4d5e6635c 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3dc5fd6065a3..8c31b0f2e28f 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
/**
* devfreq_event_remove_edev() - Remove the devfreq-event device registered.
- * @dev : the devfreq-event device
+ * @edev : the devfreq-event device
*
- * Note that this function remove the registered devfreq-event device.
+ * Note that this function removes the registered devfreq-event device.
*/
int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 57f6944d65a6..cceee8bc3c2f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -33,6 +34,7 @@
#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
+static struct dentry *devfreq_debugfs;
/*
* devfreq core provides delayed work based load monitoring helper
@@ -209,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq)
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
int lev, prev_lev, ret = 0;
- unsigned long cur_time;
+ u64 cur_time;
lockdep_assert_held(&devfreq->lock);
- cur_time = jiffies;
+ cur_time = get_jiffies_64();
/* Immediately exit if previous_freq is not initialized yet. */
if (!devfreq->previous_freq)
@@ -224,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
goto out;
}
- devfreq->time_in_state[prev_lev] +=
- cur_time - devfreq->last_stat_updated;
+ devfreq->stats.time_in_state[prev_lev] +=
+ cur_time - devfreq->stats.last_update;
lev = devfreq_get_freq_level(devfreq, freq);
if (lev < 0) {
@@ -234,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
}
if (lev != prev_lev) {
- devfreq->trans_table[(prev_lev *
- devfreq->profile->max_state) + lev]++;
- devfreq->total_trans++;
+ devfreq->stats.trans_table[
+ (prev_lev * devfreq->profile->max_state) + lev]++;
+ devfreq->stats.total_trans++;
}
out:
- devfreq->last_stat_updated = cur_time;
+ devfreq->stats.last_update = cur_time;
return ret;
}
EXPORT_SYMBOL(devfreq_update_status);
@@ -535,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
msecs_to_jiffies(devfreq->profile->polling_ms));
out_update:
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.last_update = get_jiffies_64();
devfreq->stop_polling = false;
if (devfreq->profile->get_cur_freq &&
@@ -807,28 +809,29 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
- devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
array3_size(sizeof(unsigned int),
devfreq->profile->max_state,
devfreq->profile->max_state),
GFP_KERNEL);
- if (!devfreq->trans_table) {
+ if (!devfreq->stats.trans_table) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+ devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
devfreq->profile->max_state,
- sizeof(unsigned long),
+ sizeof(*devfreq->stats.time_in_state),
GFP_KERNEL);
- if (!devfreq->time_in_state) {
+ if (!devfreq->stats.time_in_state) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.total_trans = 0;
+ devfreq->stats.last_update = get_jiffies_64();
srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -1259,6 +1262,14 @@ err_out:
}
EXPORT_SYMBOL(devfreq_remove_governor);
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+}
+static DEVICE_ATTR_RO(name);
+
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1461,6 +1472,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%lu\n", min_freq);
}
+static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1501,7 +1513,6 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1580,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev,
devfreq->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
- devfreq->trans_table[(i * max_state) + j]);
- len += sprintf(buf + len, "%10u\n",
- jiffies_to_msecs(devfreq->time_in_state[i]));
+ devfreq->stats.trans_table[(i * max_state) + j]);
+
+ len += sprintf(buf + len, "%10llu\n", (u64)
+ jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
}
len += sprintf(buf + len, "Total transition : %u\n",
- devfreq->total_trans);
+ devfreq->stats.total_trans);
return len;
}
-static DEVICE_ATTR_RO(trans_stat);
+
+static ssize_t trans_stat_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int err, value;
+
+ if (df->profile->max_state == 0)
+ return count;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err || value != 0)
+ return -EINVAL;
+
+ mutex_lock(&df->lock);
+ memset(df->stats.time_in_state, 0, (df->profile->max_state *
+ sizeof(*df->stats.time_in_state)));
+ memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
+ df->profile->max_state,
+ df->profile->max_state));
+ df->stats.total_trans = 0;
+ df->stats.last_update = get_jiffies_64();
+ mutex_unlock(&df->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(trans_stat);
static struct attribute *devfreq_attrs[] = {
+ &dev_attr_name.attr,
&dev_attr_governor.attr,
&dev_attr_available_governors.attr,
&dev_attr_cur_freq.attr,
@@ -1605,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = {
};
ATTRIBUTE_GROUPS(devfreq);
+/**
+ * devfreq_summary_show() - Show the summary of the devfreq devices
+ * @s: seq_file instance to show the summary of devfreq devices
+ * @data: not used
+ *
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
+ * It helps that user can know the detailed information of the devfreq devices.
+ *
+ * Return 0 always because it shows the information without any data change.
+ */
+static int devfreq_summary_show(struct seq_file *s, void *data)
+{
+ struct devfreq *devfreq;
+ struct devfreq *p_devfreq = NULL;
+ unsigned long cur_freq, min_freq, max_freq;
+ unsigned int polling_ms;
+
+ seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
+ "dev_name",
+ "dev",
+ "parent_dev",
+ "governor",
+ "polling_ms",
+ "cur_freq_Hz",
+ "min_freq_Hz",
+ "max_freq_Hz");
+ seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+ "------------------------------",
+ "----------",
+ "----------",
+ "---------------",
+ "----------",
+ "------------",
+ "------------",
+ "------------");
+
+ mutex_lock(&devfreq_list_lock);
+
+ list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+ if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
+ DEVFREQ_NAME_LEN)) {
+ struct devfreq_passive_data *data = devfreq->data;
+
+ if (data)
+ p_devfreq = data->parent;
+ } else {
+ p_devfreq = NULL;
+ }
+#endif
+
+ mutex_lock(&devfreq->lock);
+ cur_freq = devfreq->previous_freq,
+ get_freq_range(devfreq, &min_freq, &max_freq);
+ polling_ms = devfreq->profile->polling_ms,
+ mutex_unlock(&devfreq->lock);
+
+ seq_printf(s,
+ "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
+ dev_name(devfreq->dev.parent),
+ dev_name(&devfreq->dev),
+ p_devfreq ? dev_name(&p_devfreq->dev) : "null",
+ devfreq->governor_name,
+ polling_ms,
+ cur_freq,
+ min_freq,
+ max_freq);
+ }
+
+ mutex_unlock(&devfreq_list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
+
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -1621,6 +1736,11 @@ static int __init devfreq_init(void)
}
devfreq_class->dev_groups = devfreq_groups;
+ devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
+ debugfs_create_file("devfreq_summary", 0444,
+ devfreq_debugfs, NULL,
+ &devfreq_summary_fops);
+
return 0;
}
subsys_initcall(devfreq_init);
@@ -1814,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res)
/**
* devm_devfreq_register_notifier()
- - Resource-managed devfreq_register_notifier()
+ * - Resource-managed devfreq_register_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
@@ -1850,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
/**
* devm_devfreq_unregister_notifier()
- - Resource-managed devfreq_unregister_notifier()
+ * - Resource-managed devfreq_unregister_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index cef2cf5347ca..878825372f6f 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_NOCP
- tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
select REGMAP_MMIO
@@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
config DEVFREQ_EVENT_EXYNOS_PPMU
- tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
help
@@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
config DEVFREQ_EVENT_ROCKCHIP_DFI
tristate "ROCKCHIP DFI DEVFREQ event Driver"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
This add the devfreq-event driver for Rockchip SoC. It provides DFI
(DDR Monitor Module) driver to count ddr load.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 1c565926db9f..ccc531ee6938 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
index 55cc96284a36..2d6f08cfd0c5 100644
--- a/drivers/devfreq/event/exynos-nocp.h
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 85c7a77bf3f0..17ed980d9099 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support
*
* Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
@@ -101,17 +101,22 @@ static struct __exynos_ppmu_events {
PPMU_EVENT(dmc1_1),
};
-static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
{
int i;
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
- if (!strcmp(edev->desc->name, ppmu_events[i].name))
+ if (!strcmp(edev_name, ppmu_events[i].name))
return ppmu_events[i].id;
return -EINVAL;
}
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+ return __exynos_ppmu_find_ppmu_id(edev->desc->name);
+}
+
/*
* The devfreq-event ops structure for PPMU v1.1
*/
@@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np,
* use default if not.
*/
if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
- struct devfreq_event_dev edev;
int id;
/* Not all registers take the same value for
* read+write data count.
*/
- edev.desc = &desc[j];
- id = exynos_ppmu_find_ppmu_id(&edev);
+ id = __exynos_ppmu_find_ppmu_id(desc[j].name);
switch (id) {
case PPMU_PMNCNT0:
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
index 284420047455..97f667d0cbdd 100644
--- a/drivers/devfreq/event/exynos-ppmu.h
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos_ppmu.h - EXYNOS PPMU header file
+ * exynos_ppmu.h - Exynos PPMU header file
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 5d1042188727..9a88faaf8b27 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_dfi *data;
- struct resource *res;
struct devfreq_event_desc *desc;
struct device_node *np = pdev->dev.of_node, *node;
@@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
@@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(data->regmap_pmu))
return PTR_ERR(data->regmap_pmu);
}
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c832673273a2..8fa8eb541373 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -15,11 +15,10 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#define DEFAULT_SATURATION_RATIO 40
@@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
ret = exynos_bus_get_event(bus, &edata);
if (ret < 0) {
+ dev_err(dev, "failed to get event from devfreq-event devices\n");
stat->total_time = stat->busy_time = 0;
goto err;
}
@@ -287,52 +287,12 @@ err_clk:
return ret;
}
-static int exynos_bus_probe(struct platform_device *pdev)
+static int exynos_bus_profile_init(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node, *node;
- struct devfreq_dev_profile *profile;
+ struct device *dev = bus->dev;
struct devfreq_simple_ondemand_data *ondemand_data;
- struct devfreq_passive_data *passive_data;
- struct devfreq *parent_devfreq;
- struct exynos_bus *bus;
- int ret, max_state;
- unsigned long min_freq, max_freq;
- bool passive = false;
-
- if (!np) {
- dev_err(dev, "failed to find devicetree node\n");
- return -EINVAL;
- }
-
- bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
- if (!bus)
- return -ENOMEM;
- mutex_init(&bus->lock);
- bus->dev = &pdev->dev;
- platform_set_drvdata(pdev, bus);
-
- profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
- if (!profile)
- return -ENOMEM;
-
- node = of_parse_phandle(dev->of_node, "devfreq", 0);
- if (node) {
- of_node_put(node);
- passive = true;
- } else {
- ret = exynos_bus_parent_parse_of(np, bus);
- if (ret < 0)
- return ret;
- }
-
- /* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
- if (ret < 0)
- goto err_reg;
-
- if (passive)
- goto passive;
+ int ret;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
@@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
profile->exit = exynos_bus_exit;
ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
- if (!ondemand_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!ondemand_data)
+ return -ENOMEM;
+
ondemand_data->upthreshold = 40;
ondemand_data->downdifferential = 5;
@@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev)
ondemand_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev, "failed to add devfreq device\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
}
/* Register opp_notifier to catch the change of OPP */
ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
if (ret < 0) {
dev_err(dev, "failed to register opp notifier\n");
- goto err;
+ return ret;
}
/*
@@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev)
ret = exynos_bus_enable_edev(bus);
if (ret < 0) {
dev_err(dev, "failed to enable devfreq-event devices\n");
- goto err;
+ return ret;
}
ret = exynos_bus_set_event(bus);
if (ret < 0) {
dev_err(dev, "failed to set event to devfreq-event devices\n");
- goto err;
+ goto err_edev;
}
- goto out;
-passive:
+ return 0;
+
+err_edev:
+ if (exynos_bus_disable_edev(bus))
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+ return ret;
+}
+
+static int exynos_bus_profile_init_passive(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
+{
+ struct device *dev = bus->dev;
+ struct devfreq_passive_data *passive_data;
+ struct devfreq *parent_devfreq;
+
/* Initialize the struct profile and governor data for passive device */
profile->target = exynos_bus_target;
profile->exit = exynos_bus_passive_exit;
/* Get the instance of parent devfreq device */
parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
- if (IS_ERR(parent_devfreq)) {
- ret = -EPROBE_DEFER;
- goto err;
- }
+ if (IS_ERR(parent_devfreq))
+ return -EPROBE_DEFER;
passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
- if (!passive_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!passive_data)
+ return -ENOMEM;
+
passive_data->parent = parent_devfreq;
/* Add devfreq device for exynos bus with passive governor */
@@ -407,11 +376,61 @@ passive:
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
+ }
+
+ return 0;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *node;
+ struct devfreq_dev_profile *profile;
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
+ bool passive = false;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
}
-out:
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ mutex_init(&bus->lock);
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
+ passive = true;
+ } else {
+ ret = exynos_bus_parent_parse_of(np, bus);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+ goto err_reg;
+
+ if (passive)
+ ret = exynos_bus_profile_init_passive(bus, profile);
+ else
+ ret = exynos_bus_profile_init(bus, profile);
+
+ if (ret < 0)
+ goto err;
+
max_state = bus->devfreq->profile->max_state;
min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c
new file mode 100644
index 000000000000..bc82d3653bff
--- /dev/null
+++ b/drivers/devfreq/imx8m-ddrc.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/arm-smccc.h>
+
+#define IMX_SIP_DDR_DVFS 0xc2000004
+
+/* Query available frequencies. */
+#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
+#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
+
+/*
+ * This should be in a 1:1 mapping with devicetree OPPs but
+ * firmware provides additional info.
+ */
+struct imx8m_ddrc_freq {
+ unsigned long rate;
+ unsigned long smcarg;
+ int dram_core_parent_index;
+ int dram_alt_parent_index;
+ int dram_apb_parent_index;
+};
+
+/* Hardware limitation */
+#define IMX8M_DDRC_MAX_FREQ_COUNT 4
+
+/*
+ * i.MX8M DRAM Controller clocks have the following structure (abridged):
+ *
+ * +----------+ |\ +------+
+ * | dram_pll |-------|M| dram_core | |
+ * +----------+ |U|---------->| D |
+ * /--|X| | D |
+ * dram_alt_root | |/ | R |
+ * | | C |
+ * +---------+ | |
+ * |FIX DIV/4| | |
+ * +---------+ | |
+ * composite: | | |
+ * +----------+ | | |
+ * | dram_alt |----/ | |
+ * +----------+ | |
+ * | dram_apb |-------------------->| |
+ * +----------+ +------+
+ *
+ * The dram_pll is used for higher rates and dram_alt is used for lower rates.
+ *
+ * Frequency switching is implemented in TF-A (via SMC call) and can change the
+ * configuration of the clocks, including mux parents. The dram_alt and
+ * dram_apb clocks are "imx composite" and their parent can change too.
+ *
+ * We need to prepare/enable the new mux parents head of switching and update
+ * their information afterwards.
+ */
+struct imx8m_ddrc {
+ struct devfreq_dev_profile profile;
+ struct devfreq *devfreq;
+
+ /* For frequency switching: */
+ struct clk *dram_core;
+ struct clk *dram_pll;
+ struct clk *dram_alt;
+ struct clk *dram_apb;
+
+ int freq_count;
+ struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT];
+};
+
+static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv,
+ unsigned long rate)
+{
+ struct imx8m_ddrc_freq *freq;
+ int i;
+
+ /*
+ * Firmware reports values in MT/s, so we round-down from Hz
+ * Rounding is extra generous to ensure a match.
+ */
+ rate = DIV_ROUND_CLOSEST(rate, 250000);
+ for (i = 0; i < priv->freq_count; ++i) {
+ freq = &priv->freq_table[i];
+ if (freq->rate == rate ||
+ freq->rate + 1 == rate ||
+ freq->rate - 1 == rate)
+ return freq;
+ }
+
+ return NULL;
+}
+
+static void imx8m_ddrc_smc_set_freq(int target_freq)
+{
+ struct arm_smccc_res res;
+ u32 online_cpus = 0;
+ int cpu;
+
+ local_irq_disable();
+
+ for_each_online_cpu(cpu)
+ online_cpus |= (1 << (cpu * 8));
+
+ /* change the ddr freqency */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus,
+ 0, 0, 0, 0, 0, &res);
+
+ local_irq_enable();
+}
+
+static struct clk *clk_get_parent_by_index(struct clk *clk, int index)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index);
+
+ return hw ? hw->clk : NULL;
+}
+
+static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct clk *new_dram_core_parent;
+ struct clk *new_dram_alt_parent;
+ struct clk *new_dram_apb_parent;
+ int ret;
+
+ /*
+ * Fetch new parents
+ *
+ * new_dram_alt_parent and new_dram_apb_parent are optional but
+ * new_dram_core_parent is not.
+ */
+ new_dram_core_parent = clk_get_parent_by_index(
+ priv->dram_core, freq->dram_core_parent_index - 1);
+ if (!new_dram_core_parent) {
+ dev_err(dev, "failed to fetch new dram_core parent\n");
+ return -EINVAL;
+ }
+ if (freq->dram_alt_parent_index) {
+ new_dram_alt_parent = clk_get_parent_by_index(
+ priv->dram_alt,
+ freq->dram_alt_parent_index - 1);
+ if (!new_dram_alt_parent) {
+ dev_err(dev, "failed to fetch new dram_alt parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_alt_parent = NULL;
+
+ if (freq->dram_apb_parent_index) {
+ new_dram_apb_parent = clk_get_parent_by_index(
+ priv->dram_apb,
+ freq->dram_apb_parent_index - 1);
+ if (!new_dram_apb_parent) {
+ dev_err(dev, "failed to fetch new dram_apb parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_apb_parent = NULL;
+
+ /* increase reference counts and ensure clks are ON before switch */
+ ret = clk_prepare_enable(new_dram_core_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_core parent: %d\n",
+ ret);
+ goto out;
+ }
+ ret = clk_prepare_enable(new_dram_alt_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_alt parent: %d\n",
+ ret);
+ goto out_disable_core_parent;
+ }
+ ret = clk_prepare_enable(new_dram_apb_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_apb parent: %d\n",
+ ret);
+ goto out_disable_alt_parent;
+ }
+
+ imx8m_ddrc_smc_set_freq(freq->smcarg);
+
+ /* update parents in clk tree after switch. */
+ ret = clk_set_parent(priv->dram_core, new_dram_core_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_core parent: %d\n", ret);
+ if (new_dram_alt_parent) {
+ ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_alt parent: %d\n",
+ ret);
+ }
+ if (new_dram_apb_parent) {
+ ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_apb parent: %d\n",
+ ret);
+ }
+
+ /*
+ * Explicitly refresh dram PLL rate.
+ *
+ * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be
+ * automatically refreshed when clk_get_rate is called on children.
+ */
+ clk_get_rate(priv->dram_pll);
+
+ /*
+ * clk_set_parent transfer the reference count from old parent.
+ * now we drop extra reference counts used during the switch
+ */
+ clk_disable_unprepare(new_dram_apb_parent);
+out_disable_alt_parent:
+ clk_disable_unprepare(new_dram_alt_parent);
+out_disable_core_parent:
+ clk_disable_unprepare(new_dram_core_parent);
+out:
+ return ret;
+}
+
+static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq;
+ int ret;
+
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ ret = PTR_ERR(new_opp);
+ dev_err(dev, "failed to get recommended opp: %d\n", ret);
+ return ret;
+ }
+ dev_pm_opp_put(new_opp);
+
+ old_freq = clk_get_rate(priv->dram_core);
+ if (*freq == old_freq)
+ return 0;
+
+ freq_info = imx8m_ddrc_find_freq(priv, *freq);
+ if (!freq_info)
+ return -EINVAL;
+
+ /*
+ * Read back the clk rate to verify switch was correct and so that
+ * we can report it on all error paths.
+ */
+ ret = imx8m_ddrc_set_freq(dev, freq_info);
+
+ new_freq = clk_get_rate(priv->dram_core);
+ if (ret)
+ dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n",
+ *freq, old_freq, ret, new_freq);
+ else if (*freq != new_freq)
+ dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n",
+ *freq, old_freq, new_freq);
+ else
+ dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n",
+ *freq, old_freq);
+
+ return ret;
+}
+
+static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ stat->current_frequency = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_init_freq_info(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
+ int index;
+
+ /* An error here means DDR DVFS API not supported by firmware */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT,
+ 0, 0, 0, 0, 0, 0, &res);
+ priv->freq_count = res.a0;
+ if (priv->freq_count <= 0 ||
+ priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT)
+ return -ENODEV;
+
+ for (index = 0; index < priv->freq_count; ++index) {
+ struct imx8m_ddrc_freq *freq = &priv->freq_table[index];
+
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO,
+ index, 0, 0, 0, 0, 0, &res);
+ /* Result should be strictly positive */
+ if ((long)res.a0 <= 0)
+ return -ENODEV;
+
+ freq->rate = res.a0;
+ freq->smcarg = index;
+ freq->dram_core_parent_index = res.a1;
+ freq->dram_alt_parent_index = res.a2;
+ freq->dram_apb_parent_index = res.a3;
+
+ /* dram_core has 2 options: dram_pll or dram_alt_root */
+ if (freq->dram_core_parent_index != 1 &&
+ freq->dram_core_parent_index != 2)
+ return -ENODEV;
+ /* dram_apb and dram_alt have exactly 8 possible parents */
+ if (freq->dram_alt_parent_index > 8 ||
+ freq->dram_apb_parent_index > 8)
+ return -ENODEV;
+ /* dram_core from alt requires explicit dram_alt parent */
+ if (freq->dram_core_parent_index == 2 &&
+ freq->dram_alt_parent_index == 0)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int imx8m_ddrc_check_opps(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int i, opp_count;
+
+ /* Enumerate DT OPPs and disable those not supported by firmware */
+ opp_count = dev_pm_opp_get_opp_count(dev);
+ if (opp_count < 0)
+ return opp_count;
+ for (i = 0, freq = 0; i < opp_count; ++i, ++freq) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed enumerating OPPs: %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+ dev_pm_opp_put(opp);
+
+ freq_info = imx8m_ddrc_find_freq(priv, freq);
+ if (!freq_info) {
+ dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n",
+ freq, DIV_ROUND_CLOSEST(freq, 250000));
+ dev_pm_opp_disable(dev, freq);
+ }
+ }
+
+ return 0;
+}
+
+static void imx8m_ddrc_exit(struct device *dev)
+{
+ dev_pm_opp_of_remove_table(dev);
+}
+
+static int imx8m_ddrc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx8m_ddrc *priv;
+ const char *gov = DEVFREQ_GOV_USERSPACE;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = imx8m_ddrc_init_freq_info(dev);
+ if (ret) {
+ dev_err(dev, "failed to init firmware freq info: %d\n", ret);
+ return ret;
+ }
+
+ priv->dram_core = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->dram_core)) {
+ ret = PTR_ERR(priv->dram_core);
+ dev_err(dev, "failed to fetch core clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_pll = devm_clk_get(dev, "pll");
+ if (IS_ERR(priv->dram_pll)) {
+ ret = PTR_ERR(priv->dram_pll);
+ dev_err(dev, "failed to fetch pll clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_alt = devm_clk_get(dev, "alt");
+ if (IS_ERR(priv->dram_alt)) {
+ ret = PTR_ERR(priv->dram_alt);
+ dev_err(dev, "failed to fetch alt clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(priv->dram_apb)) {
+ ret = PTR_ERR(priv->dram_apb);
+ dev_err(dev, "failed to fetch apb clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ return ret;
+ }
+
+ ret = imx8m_ddrc_check_opps(dev);
+ if (ret < 0)
+ goto err;
+
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = imx8m_ddrc_target;
+ priv->profile.get_dev_status = imx8m_ddrc_get_dev_status;
+ priv->profile.exit = imx8m_ddrc_exit;
+ priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq;
+ priv->profile.initial_freq = clk_get_rate(priv->dram_core);
+
+ priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+ gov, NULL);
+ if (IS_ERR(priv->devfreq)) {
+ ret = PTR_ERR(priv->devfreq);
+ dev_err(dev, "failed to add devfreq device: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+}
+
+static const struct of_device_id imx8m_ddrc_of_match[] = {
+ { .compatible = "fsl,imx8m-ddrc", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match);
+
+static struct platform_driver imx8m_ddrc_platdrv = {
+ .probe = imx8m_ddrc_probe,
+ .driver = {
+ .name = "imx8m-ddrc-devfreq",
+ .of_match_table = of_match_ptr(imx8m_ddrc_of_match),
+ },
+};
+module_platform_driver(imx8m_ddrc_platdrv);
+
+MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e65d7279d79..24f04f78285b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
if (res.a0) {
dev_err(dev, "Failed to set dram param: %ld\n",
res.a0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
}
}
@@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
- if (IS_ERR(data->regmap_pmu))
- return PTR_ERR(data->regmap_pmu);
+ of_node_put(node);
+ if (IS_ERR(data->regmap_pmu)) {
+ ret = PTR_ERR(data->regmap_pmu);
+ goto err_edev;
+ }
}
regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
@@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
};
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
@@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
*/
if (dev_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
of_property_read_u32(np, "upthreshold",
@@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
err_free_opp:
dev_pm_opp_of_remove_table(&pdev->dev);
+err_edev:
+ devfreq_event_disable_edev(data->edev);
+
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6fa1eba9d477..5142da401db3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -239,6 +239,14 @@ config FSL_RAID
the capability to offload memcpy, xor and pq computation
for raid5/6.
+config HISI_DMA
+ tristate "HiSilicon DMA Engine support"
+ depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support HiSilicon Kunpeng DMA engine.
+
config IMG_MDC_DMA
tristate "IMG MDC support"
depends on MIPS || COMPILE_TEST
@@ -273,6 +281,19 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD
+ tristate "Intel Data Accelerators support"
+ depends on PCI && X86_64
+ select DMA_ENGINE
+ select SBITMAP
+ help
+ Enable support for the Intel(R) data accelerators present
+ in Intel Xeon CPU.
+
+ Say Y if you have such a platform.
+
+ If unsure, say N.
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
@@ -497,6 +518,15 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
+config PLX_DMA
+ tristate "PLX ExpressLane PEX Switch DMA Engine Support"
+ depends on PCI
+ select DMA_ENGINE
+ help
+ Some PLX ExpressLane PCI Switches support additional DMA engines.
+ These are exposed via extra functions on the switch's
+ upstream port. Each function exposes one DMA channel.
+
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 42d7e2fc64fa..1d908394fbea 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HISI_DMA) += hisi_dma.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
@@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PLX_DMA) += plx_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 832aefbe7af9..539e785039ca 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -772,10 +772,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
return -EBUSY;
}
- *ptr = devm_ioremap_nocache(device, region->start,
+ *ptr = devm_ioremap(device, region->start,
resource_size(region));
if (*ptr == NULL) {
- dev_err(device, "ioremap_nocache of %s failed!", name);
+ dev_err(device, "ioremap of %s failed!", name);
return -ENOMEM;
}
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e4c593f48575..4768ef26013b 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
/* stop DMA activity */
if (c->desc) {
- if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
- vchan_terminate_vdesc(&c->desc->vd);
- else
- vchan_vdesc_fini(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index a0ee404b736e..f1d149e32839 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
+ struct regmap *regmap;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dmac);
- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
+ &axi_dmac_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_irq;
+ }
return 0;
+err_free_irq:
+ free_irq(dmac->irq, dmac);
err_unregister_of:
of_dma_controller_free(pdev->dev.of_node);
err_unregister_device:
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 44af435628f8..448f663da89c 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1021,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
.flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
+ { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 03ac4b96117c..f3ef4edd4de1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -60,6 +60,8 @@ static long dmaengine_ref_count;
/* --- sysfs implementation --- */
+#define DMA_SLAVE_NAME "slave"
+
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev - device node
@@ -164,11 +166,152 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
-#define dma_device_satisfies_mask(device, mask) \
- __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
- const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ * the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ * the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min || chan->table_count < min->table_count)
+ min = chan;
+
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
+ }
+ }
+
+ chan = localmin ? localmin : min;
+
+ if (chan)
+ chan->table_count++;
+
+ return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case. Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
+
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
+
+ /* redistribute available channels */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ chan = min_chan(cap, cpu);
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
@@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
- return chan->device->dev->driver->owner;
+ return chan->device->owner;
}
/**
@@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan)
}
}
+static void dma_device_release(struct kref *ref)
+{
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
+
+ if (device->device_release)
+ device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+ lockdep_assert_held(&dma_list_mutex);
+ kref_put(&device->ref, dma_device_release);
+}
+
/**
* dma_chan_get - try to grab a dma channel's parent driver module
* @chan - channel to grab
@@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan)
if (!try_module_get(owner))
return -ENODEV;
+ ret = kref_get_unless_zero(&chan->device->ref);
+ if (!ret) {
+ ret = -ENODEV;
+ goto module_put_out;
+ }
+
/* allocate upon first client reference */
if (chan->device->device_alloc_chan_resources) {
ret = chan->device->device_alloc_chan_resources(chan);
@@ -233,6 +399,8 @@ out:
return 0;
err_out:
+ dma_device_put(chan->device);
+module_put_out:
module_put(owner);
return ret;
}
@@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan)
return;
chan->client_count--;
- module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
if (!chan->client_count && chan->device->device_free_chan_resources) {
@@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan)
chan->router = NULL;
chan->route_data = NULL;
}
+
+ dma_device_put(chan->device);
+ module_put(dma_chan_to_owner(chan));
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
EXPORT_SYMBOL(dma_sync_wait);
/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
- struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
- enum dma_transaction_type cap;
- int err = 0;
-
- bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
- /* 'interrupt', 'private', and 'slave' are channel capabilities,
- * but are not associated with an operation so they do not need
- * an entry in the channel_table
- */
- clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
- clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
- clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
- for_each_dma_cap_mask(cap, dma_cap_mask_all) {
- channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
- if (!channel_table[cap]) {
- err = -ENOMEM;
- break;
- }
- }
-
- if (err) {
- pr_err("initialization failure\n");
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- free_percpu(channel_table[cap]);
- }
-
- return err;
-}
-arch_initcall(dma_channel_table_init);
-
-/**
* dma_find_channel - find a channel to carry out the operation
* @tx_type: transaction type
*/
@@ -369,97 +488,6 @@ void dma_issue_pending_all(void)
}
EXPORT_SYMBOL(dma_issue_pending_all);
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
- int node = dev_to_node(chan->device->dev);
- return node == NUMA_NO_NODE ||
- cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
- struct dma_device *device;
- struct dma_chan *chan;
- struct dma_chan *min = NULL;
- struct dma_chan *localmin = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (!dma_has_cap(cap, device->cap_mask) ||
- dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node) {
- if (!chan->client_count)
- continue;
- if (!min || chan->table_count < min->table_count)
- min = chan;
-
- if (dma_chan_is_local(chan, cpu))
- if (!localmin ||
- chan->table_count < localmin->table_count)
- localmin = chan;
- }
- }
-
- chan = localmin ? localmin : min;
-
- if (chan)
- chan->table_count++;
-
- return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
- struct dma_chan *chan;
- struct dma_device *device;
- int cpu;
- int cap;
-
- /* undo the last distribution */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_possible_cpu(cpu)
- per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node)
- chan->table_count = 0;
- }
-
- /* don't populate the channel_table if no clients are available */
- if (!dmaengine_ref_count)
- return;
-
- /* redistribute available channels */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_online_cpu(cpu) {
- chan = min_chan(cap, cpu);
- per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
- }
-}
-
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
struct dma_device *device;
@@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
{
struct dma_chan *chan;
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
@@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
if (has_acpi_companion(dev) && !chan)
chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan) {
- /* Valid channel found or requester needs to be deferred */
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- return chan;
- }
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
/* Try to find the channel via the DMA filter map(s) */
mutex_lock(&dma_list_mutex);
@@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
}
mutex_unlock(&dma_list_mutex);
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
+
+ return ERR_PTR(-EPROBE_DEFER);
+
+found:
+ chan->slave = dev;
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+ if (!chan->name)
+ return ERR_PTR(-ENOMEM);
+
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+ DMA_SLAVE_NAME))
+ dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+ dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+ return chan;
}
EXPORT_SYMBOL_GPL(dma_request_chan);
@@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+ if (chan->slave) {
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
+ kfree(chan->name);
+ chan->name = NULL;
+ chan->slave = NULL;
+ }
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get);
*/
void dmaengine_put(void)
{
- struct dma_device *device;
+ struct dma_device *device, *_d;
struct dma_chan *chan;
mutex_lock(&dma_list_mutex);
dmaengine_ref_count--;
BUG_ON(dmaengine_ref_count < 0);
/* drop channel references */
- list_for_each_entry(device, &dma_device_list, global_node) {
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
continue;
list_for_each_entry(chan, &device->channels, device_node)
@@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device)
return 0;
}
+static int __dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan,
+ int chan_id)
+{
+ int rc = 0;
+ int chancnt = device->chancnt;
+ atomic_t *idr_ref;
+ struct dma_chan *tchan;
+
+ tchan = list_first_entry_or_null(&device->channels,
+ struct dma_chan, device_node);
+ if (tchan->dev) {
+ idr_ref = tchan->dev->idr_ref;
+ } else {
+ idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+ if (!idr_ref)
+ return -ENOMEM;
+ atomic_set(idr_ref, 0);
+ }
+
+ chan->local = alloc_percpu(typeof(*chan->local));
+ if (!chan->local)
+ goto err_out;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (!chan->dev) {
+ free_percpu(chan->local);
+ chan->local = NULL;
+ goto err_out;
+ }
+
+ /*
+ * When the chan_id is a negative value, we are dynamically adding
+ * the channel. Otherwise we are static enumerating.
+ */
+ chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->idr_ref = idr_ref;
+ chan->dev->dev_id = device->dev_id;
+ atomic_inc(idr_ref);
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
+
+ rc = device_register(&chan->dev->device);
+ if (rc)
+ goto err_out;
+ chan->client_count = 0;
+ device->chancnt = chan->chan_id + 1;
+
+ return 0;
+
+ err_out:
+ free_percpu(chan->local);
+ kfree(chan->dev);
+ if (atomic_dec_return(idr_ref) == 0)
+ kfree(idr_ref);
+ return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ rc = __dma_async_device_channel_register(device, chan, -1);
+ if (rc < 0)
+ return rc;
+
+ dma_channel_rebalance();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ list_del(&chan->device_node);
+ device->chancnt--;
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ __dma_async_device_channel_unregister(device, chan);
+ dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
*/
int dma_async_device_register(struct dma_device *device)
{
- int chancnt = 0, rc;
+ int rc, i = 0;
struct dma_chan* chan;
- atomic_t *idr_ref;
if (!device)
return -ENODEV;
@@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ device->owner = device->dev->driver->owner;
+
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
@@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (!device->device_release)
+ dev_warn(device->dev,
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+ kref_init(&device->ref);
+
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
- if (!idr_ref)
- return -ENOMEM;
rc = get_dma_id(device);
- if (rc != 0) {
- kfree(idr_ref);
+ if (rc != 0)
return rc;
- }
-
- atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = -ENOMEM;
- chan->local = alloc_percpu(typeof(*chan->local));
- if (chan->local == NULL)
- goto err_out;
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
- if (chan->dev == NULL) {
- free_percpu(chan->local);
- chan->local = NULL;
+ rc = __dma_async_device_channel_register(device, chan, i++);
+ if (rc < 0)
goto err_out;
- }
-
- chan->chan_id = chancnt++;
- chan->dev->device.class = &dma_devclass;
- chan->dev->device.parent = device->dev;
- chan->dev->chan = chan;
- chan->dev->idr_ref = idr_ref;
- chan->dev->dev_id = device->dev_id;
- atomic_inc(idr_ref);
- dev_set_name(&chan->dev->device, "dma%dchan%d",
- device->dev_id, chan->chan_id);
-
- rc = device_register(&chan->dev->device);
- if (rc) {
- free_percpu(chan->local);
- chan->local = NULL;
- kfree(chan->dev);
- atomic_dec(idr_ref);
- goto err_out;
- }
- chan->client_count = 0;
- }
-
- if (!chancnt) {
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
- rc = -ENODEV;
- goto err_out;
}
- device->chancnt = chancnt;
-
mutex_lock(&dma_list_mutex);
/* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device)
err_out:
/* if we never registered a channel just release the idr */
- if (atomic_read(idr_ref) == 0) {
+ if (!device->chancnt) {
ida_free(&dma_ida, device->dev_id);
- kfree(idr_ref);
return rc;
}
@@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register);
*/
void dma_async_device_unregister(struct dma_device *device)
{
- struct dma_chan *chan;
+ struct dma_chan *chan, *n;
+
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
+ __dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex);
- list_del_rcu(&device->global_node);
+ /*
+ * setting DMA_PRIVATE ensures the device being torn down will not
+ * be used in the channel_table
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
+ dma_device_put(device);
mutex_unlock(&dma_list_mutex);
-
- list_for_each_entry(chan, &device->channels, device_node) {
- WARN_ONCE(chan->client_count,
- "%s called while %d clients hold a reference\n",
- __func__, chan->client_count);
- mutex_lock(&dma_list_mutex);
- chan->dev->chan = NULL;
- mutex_unlock(&dma_list_mutex);
- device_unregister(&chan->dev->device);
- free_percpu(chan->local);
- }
}
EXPORT_SYMBOL(dma_async_device_unregister);
@@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+static inline int desc_check_and_set_metadata_mode(
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+ /* Make sure that the metadata mode is not mixed */
+ if (!desc->desc_metadata_mode) {
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+ desc->desc_metadata_mode = mode;
+ else
+ return -ENOTSUPP;
+ } else if (desc->desc_metadata_mode != mode) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ int ret;
+
+ if (!desc)
+ return ERR_PTR(-EINVAL);
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+ return ERR_PTR(-ENOTSUPP);
+
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
/* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on
*/
@@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void)
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
-
-
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 501c0b063f85..e8a320c9e57c 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
state->last = complete;
state->used = used;
state->residue = 0;
+ state->in_flight_bytes = 0;
}
return dma_async_is_complete(cookie, complete, used);
}
@@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
struct dmaengine_desc_callback {
dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
@@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
return (cb->callback) ? true : false;
}
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
#endif
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a1ce307c502f..14c1ac26f866 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
vchan_get_all_descriptors(&chan->vc, &head);
- /*
- * As vchan_dma_desc_free_list can access to desc_allocated list
- * we need to call it in vc.lock context.
- */
- vchan_dma_desc_free_list(&chan->vc, &head);
-
spin_unlock_irqrestore(&chan->vc.lock, flags);
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
return 0;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b1a7ca91701a..5697c3622699 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned int chans_per_mux, ch_off;
+ int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+ if (fsl_chan->edma->drvdata->mux_swap)
+ ch_off += endian_diff[ch_off % 4];
+
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 5eaa2902ed39..67e422590c9a 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -147,6 +147,7 @@ struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
bool has_dmaclk;
+ bool mux_swap;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index b626c06ac2e0..eff7ebd8cf35 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = {
.setup_irq = fsl_edma_irq_init,
};
+static struct fsl_edma_drvdata ls1028a_data = {
+ .version = v1,
+ .dmamuxs = DMAMUX_NR,
+ .mux_swap = true,
+ .setup_irq = fsl_edma_irq_init,
+};
+
static struct fsl_edma_drvdata imx7ulp_data = {
.version = v3,
.dmamuxs = 1,
@@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = {
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+ { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 89792083d62c..95cc0256b387 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+ if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
return;
list_for_each_entry_safe(comp_temp, _comp_temp,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
new file mode 100644
index 000000000000..ed3619266a48
--- /dev/null
+++ b/drivers/dma/hisi_dma.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019 HiSilicon Limited. */
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "virt-dma.h"
+
+#define HISI_DMA_SQ_BASE_L 0x0
+#define HISI_DMA_SQ_BASE_H 0x4
+#define HISI_DMA_SQ_DEPTH 0x8
+#define HISI_DMA_SQ_TAIL_PTR 0xc
+#define HISI_DMA_CQ_BASE_L 0x10
+#define HISI_DMA_CQ_BASE_H 0x14
+#define HISI_DMA_CQ_DEPTH 0x18
+#define HISI_DMA_CQ_HEAD_PTR 0x1c
+#define HISI_DMA_CTRL0 0x20
+#define HISI_DMA_CTRL0_QUEUE_EN_S 0
+#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
+#define HISI_DMA_CTRL1 0x24
+#define HISI_DMA_CTRL1_QUEUE_RESET_S 0
+#define HISI_DMA_Q_FSM_STS 0x30
+#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
+#define HISI_DMA_INT_STS 0x40
+#define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
+#define HISI_DMA_INT_MSK 0x44
+#define HISI_DMA_MODE 0x217c
+#define HISI_DMA_OFFSET 0x100
+
+#define HISI_DMA_MSI_NUM 30
+#define HISI_DMA_CHAN_NUM 30
+#define HISI_DMA_Q_DEPTH_VAL 1024
+
+#define PCI_BAR_2 2
+
+enum hisi_dma_mode {
+ EP = 0,
+ RC,
+};
+
+enum hisi_dma_chan_status {
+ DISABLE = -1,
+ IDLE = 0,
+ RUN,
+ CPL,
+ PAUSE,
+ HALT,
+ ABORT,
+ WAIT,
+ BUFFCLR,
+};
+
+struct hisi_dma_sqe {
+ __le32 dw0;
+#define OPCODE_MASK GENMASK(3, 0)
+#define OPCODE_SMALL_PACKAGE 0x1
+#define OPCODE_M2M 0x4
+#define LOCAL_IRQ_EN BIT(8)
+#define ATTR_SRC_MASK GENMASK(14, 12)
+ __le32 dw1;
+ __le32 dw2;
+#define ATTR_DST_MASK GENMASK(26, 24)
+ __le32 length;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct hisi_dma_cqe {
+ __le32 rsv0;
+ __le32 rsv1;
+ __le16 sq_head;
+ __le16 rsv2;
+ __le16 rsv3;
+ __le16 w0;
+#define STATUS_MASK GENMASK(15, 1)
+#define STATUS_SUCC 0x0
+#define VALID_BIT BIT(0)
+};
+
+struct hisi_dma_desc {
+ struct virt_dma_desc vd;
+ struct hisi_dma_sqe sqe;
+};
+
+struct hisi_dma_chan {
+ struct virt_dma_chan vc;
+ struct hisi_dma_dev *hdma_dev;
+ struct hisi_dma_sqe *sq;
+ struct hisi_dma_cqe *cq;
+ dma_addr_t sq_dma;
+ dma_addr_t cq_dma;
+ u32 sq_tail;
+ u32 cq_head;
+ u32 qp_num;
+ enum hisi_dma_chan_status status;
+ struct hisi_dma_desc *desc;
+};
+
+struct hisi_dma_dev {
+ struct pci_dev *pdev;
+ void __iomem *base;
+ struct dma_device dma_dev;
+ u32 chan_num;
+ u32 chan_depth;
+ struct hisi_dma_chan chan[];
+};
+
+static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct hisi_dma_chan, vc.chan);
+}
+
+static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct hisi_dma_desc, vd);
+}
+
+static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
+ u32 val)
+{
+ writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+}
+
+static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr);
+ tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+ writel_relaxed(tmp, addr);
+}
+
+static void hisi_dma_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool pause)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+}
+
+static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool enable)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+}
+
+static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
+ HISI_DMA_INT_STS_MASK);
+}
+
+static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ void __iomem *base = hdma_dev->base;
+
+ hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
+ HISI_DMA_INT_STS_MASK);
+ hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+}
+
+static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+}
+
+static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ u32 index = chan->qp_num, tmp;
+ int ret;
+
+ hisi_dma_pause_dma(hdma_dev, index, true);
+ hisi_dma_enable_dma(hdma_dev, index, false);
+ hisi_dma_mask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
+ WARN_ON(1);
+ }
+
+ hisi_dma_do_reset(hdma_dev, index);
+ hisi_dma_reset_qp_point(hdma_dev, index);
+ hisi_dma_pause_dma(hdma_dev, index, false);
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
+ WARN_ON(1);
+ }
+}
+
+static void hisi_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+
+ hisi_dma_reset_hw_chan(chan);
+ vchan_free_chan_resources(&chan->vc);
+
+ memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
+ memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
+ chan->sq_tail = 0;
+ chan->cq_head = 0;
+ chan->status = DISABLE;
+}
+
+static void hisi_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_hisi_dma_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *
+hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->sqe.length = cpu_to_le32(len);
+ desc->sqe.src_addr = cpu_to_le64(src);
+ desc->sqe.dst_addr = cpu_to_le64(dst);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+}
+
+static enum dma_status
+hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
+ chan->desc = NULL;
+ return;
+ }
+ list_del(&vd->node);
+ desc = to_hisi_dma_desc(vd);
+ chan->desc = desc;
+
+ memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
+
+ /* update other field in sqe */
+ sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
+ sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
+
+ /* make sure data has been updated in sqe */
+ wmb();
+
+ /* update sq tail, point to new sqe position */
+ chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
+
+ /* update sq_tail to trigger a new task */
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
+ chan->sq_tail);
+}
+
+static void hisi_dma_issue_pending(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (vchan_issue_pending(&chan->vc))
+ hisi_dma_start_transfer(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int hisi_dma_terminate_all(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vd);
+ chan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
+
+ return 0;
+}
+
+static void hisi_dma_synchronize(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
+{
+ size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
+ size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
+ struct device *dev = &hdma_dev->pdev->dev;
+ struct hisi_dma_chan *chan;
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ chan = &hdma_dev->chan[i];
+ chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
+ GFP_KERNEL);
+ if (!chan->sq)
+ return -ENOMEM;
+
+ chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
+ GFP_KERNEL);
+ if (!chan->cq)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+ u32 hw_depth = hdma_dev->chan_depth - 1;
+ void __iomem *base = hdma_dev->base;
+
+ /* set sq, cq base */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+ lower_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+ upper_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+ lower_32_bits(chan->cq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+ upper_32_bits(chan->cq_dma));
+
+ /* set sq, cq depth */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+
+ /* init sq tail and cq head */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_init_hw_qp(hdma_dev, qp_index);
+ hisi_dma_unmask_irq(hdma_dev, qp_index);
+ hisi_dma_enable_dma(hdma_dev, qp_index, true);
+}
+
+static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+}
+
+static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hdma_dev->chan[i].qp_num = i;
+ hdma_dev->chan[i].hdma_dev = hdma_dev;
+ hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
+ vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
+ hisi_dma_enable_qp(hdma_dev, i);
+ }
+}
+
+static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hisi_dma_disable_qp(hdma_dev, i);
+ tasklet_kill(&hdma_dev->chan[i].vc.task);
+ }
+}
+
+static irqreturn_t hisi_dma_irq(int irq, void *data)
+{
+ struct hisi_dma_chan *chan = data;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct hisi_dma_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = chan->desc;
+ cqe = chan->cq + chan->cq_head;
+ if (desc) {
+ if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
+ chan->cq_head = (chan->cq_head + 1) %
+ hdma_dev->chan_depth;
+ hisi_dma_chan_write(hdma_dev->base,
+ HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
+ chan->cq_head);
+ vchan_cookie_complete(&desc->vd);
+ } else {
+ dev_err(&hdma_dev->pdev->dev, "task error!\n");
+ }
+
+ chan->desc = NULL;
+ }
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
+{
+ struct pci_dev *pdev = hdma_dev->pdev;
+ int i, ret;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ hisi_dma_irq, IRQF_SHARED, "hisi_dma",
+ &hdma_dev->chan[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* This function enables all hw channels in a device */
+static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
+{
+ int ret;
+
+ ret = hisi_dma_alloc_qps_mem(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
+ return ret;
+ }
+
+ ret = hisi_dma_request_qps_irq(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
+ return ret;
+ }
+
+ hisi_dma_enable_qps(hdma_dev);
+
+ return 0;
+}
+
+static void hisi_dma_disable_hw_channels(void *data)
+{
+ hisi_dma_disable_qps(data);
+}
+
+static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
+ enum hisi_dma_mode mode)
+{
+ writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+}
+
+static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_dma_dev *hdma_dev;
+ struct dma_device *dma_dev;
+ size_t dev_size;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
+ if (ret) {
+ dev_err(dev, "failed to remap I/O region!\n");
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
+ sizeof(*hdma_dev);
+ hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
+ if (!hdma_dev)
+ return -EINVAL;
+
+ hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
+ hdma_dev->pdev = pdev;
+ hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
+ hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+
+ pci_set_drvdata(pdev, hdma_dev);
+ pci_set_master(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
+ PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocate MSI vectors!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
+ if (ret)
+ return ret;
+
+ dma_dev = &hdma_dev->dma_dev;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+ dma_dev->device_tx_status = hisi_dma_tx_status;
+ dma_dev->device_issue_pending = hisi_dma_issue_pending;
+ dma_dev->device_terminate_all = hisi_dma_terminate_all;
+ dma_dev->device_synchronize = hisi_dma_synchronize;
+ dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+ dma_dev->dev = dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ hisi_dma_set_mode(hdma_dev, RC);
+
+ ret = hisi_dma_enable_hw_channels(hdma_dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable hw channel!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
+ hdma_dev);
+ if (ret)
+ return ret;
+
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret < 0)
+ dev_err(dev, "failed to register device!\n");
+
+ return ret;
+}
+
+static const struct pci_device_id hisi_dma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
+ { 0, }
+};
+
+static struct pci_driver hisi_dma_pci_driver = {
+ .name = "hisi_dma",
+ .id_table = hisi_dma_pci_tbl,
+ .probe = hisi_dma_probe,
+};
+
+module_pci_driver(hisi_dma_pci_driver);
+
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
new file mode 100644
index 000000000000..8978b898d777
--- /dev/null
+++ b/drivers/dma/idxd/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IDXD) += idxd.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
new file mode 100644
index 000000000000..1d7347825b95
--- /dev/null
+++ b/drivers/dma/idxd/cdev.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/sched/task.h>
+#include <linux/intel-svm.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+struct idxd_cdev_context {
+ const char *name;
+ dev_t devt;
+ struct ida minor_ida;
+};
+
+/*
+ * ictx is an array based off of accelerator types. enum idxd_type
+ * is used as index
+ */
+static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
+ { .name = "dsa" },
+};
+
+struct idxd_user_context {
+ struct idxd_wq *wq;
+ struct task_struct *task;
+ unsigned int flags;
+};
+
+enum idxd_cdev_cleanup {
+ CDEV_NORMAL = 0,
+ CDEV_FAILED,
+};
+
+static void idxd_cdev_dev_release(struct device *dev)
+{
+ dev_dbg(dev, "releasing cdev device\n");
+ kfree(dev);
+}
+
+static struct device_type idxd_cdev_device_type = {
+ .name = "idxd_cdev",
+ .release = idxd_cdev_dev_release,
+};
+
+static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+{
+ struct cdev *cdev = inode->i_cdev;
+
+ return container_of(cdev, struct idxd_cdev, cdev);
+}
+
+static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+{
+ return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+}
+
+static inline struct idxd_wq *inode_wq(struct inode *inode)
+{
+ return idxd_cdev_wq(inode_idxd_cdev(inode));
+}
+
+static int idxd_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct idxd_user_context *ctx;
+ struct idxd_device *idxd;
+ struct idxd_wq *wq;
+ struct device *dev;
+ struct idxd_cdev *idxd_cdev;
+
+ wq = inode_wq(inode);
+ idxd = wq->idxd;
+ dev = &idxd->pdev->dev;
+ idxd_cdev = &wq->idxd_cdev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+ return -EBUSY;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->wq = wq;
+ filp->private_data = ctx;
+ idxd_wq_get(wq);
+ return 0;
+}
+
+static int idxd_cdev_release(struct inode *node, struct file *filep)
+{
+ struct idxd_user_context *ctx = filep->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+ filep->private_data = NULL;
+
+ kfree(ctx);
+ idxd_wq_put(wq);
+ return 0;
+}
+
+static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ dev_info_ratelimited(dev,
+ "%s: %s: mapping too large: %lu\n",
+ current->comm, func,
+ vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
+ unsigned long pfn;
+ int rc;
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ rc = check_vma(wq, vma, __func__);
+
+ vma->vm_flags |= VM_DONTCOPY;
+ pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
+ IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_private_data = ctx;
+
+ return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+}
+
+static __poll_t idxd_cdev_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ unsigned long flags;
+ __poll_t out = 0;
+
+ poll_wait(filp, &idxd_cdev->err_queue, wait);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ if (idxd->sw_err.valid)
+ out = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return out;
+}
+
+static const struct file_operations idxd_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = idxd_cdev_open,
+ .release = idxd_cdev_release,
+ .mmap = idxd_cdev_mmap,
+ .poll = idxd_cdev_poll,
+};
+
+int idxd_cdev_get_major(struct idxd_device *idxd)
+{
+ return MAJOR(ictx[idxd->type].devt);
+}
+
+static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+ struct device *dev;
+ int minor, rc;
+
+ idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+ if (!idxd_cdev->dev)
+ return -ENOMEM;
+
+ dev = idxd_cdev->dev;
+ dev->parent = &idxd->pdev->dev;
+ dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+ idxd->id, wq->id);
+ dev->bus = idxd_get_bus_type(idxd);
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto ida_err;
+ }
+
+ dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+ dev->type = &idxd_cdev_device_type;
+ rc = device_register(dev);
+ if (rc < 0) {
+ dev_err(&idxd->pdev->dev, "device register failed\n");
+ put_device(dev);
+ goto dev_reg_err;
+ }
+ idxd_cdev->minor = minor;
+
+ return 0;
+
+ dev_reg_err:
+ ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ ida_err:
+ kfree(dev);
+ idxd_cdev->dev = NULL;
+ return rc;
+}
+
+static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+ enum idxd_cdev_cleanup cdev_state)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ if (cdev_state == CDEV_NORMAL)
+ cdev_del(&idxd_cdev->cdev);
+ device_unregister(idxd_cdev->dev);
+ /*
+ * The device_type->release() will be called on the device and free
+ * the allocated struct device. We can just forget it.
+ */
+ ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ idxd_cdev->dev = NULL;
+ idxd_cdev->minor = -1;
+}
+
+int idxd_wq_add_cdev(struct idxd_wq *wq)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct cdev *cdev = &idxd_cdev->cdev;
+ struct device *dev;
+ int rc;
+
+ rc = idxd_wq_cdev_dev_setup(wq);
+ if (rc < 0)
+ return rc;
+
+ dev = idxd_cdev->dev;
+ cdev_init(cdev, &idxd_cdev_fops);
+ cdev_set_parent(cdev, &dev->kobj);
+ rc = cdev_add(cdev, dev->devt, 1);
+ if (rc) {
+ dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+ idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+ return rc;
+ }
+
+ init_waitqueue_head(&idxd_cdev->err_queue);
+ return 0;
+}
+
+void idxd_wq_del_cdev(struct idxd_wq *wq)
+{
+ idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+}
+
+int idxd_cdev_register(void)
+{
+ int rc, i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ ida_init(&ictx[i].minor_ida);
+ rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
+ ictx[i].name);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cdev_remove(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+ ida_destroy(&ictx[i].minor_ida);
+ }
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
new file mode 100644
index 000000000000..ada69e722f84
--- /dev/null
+++ b/drivers/dma/idxd/device.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+
+/* Interrupt control bits */
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 1;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_mask_msix_vectors(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ int i, rc;
+
+ for (i = 0; i < msixcnt; i++) {
+ rc = idxd_mask_msix_vector(idxd, i);
+ if (rc < 0)
+ dev_warn(&pdev->dev,
+ "Failed disabling msix vec %d\n", i);
+ }
+}
+
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 0;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_unmask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 1;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+void idxd_mask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 0;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+static void free_hw_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->hw_descs[i]);
+
+ kfree(wq->hw_descs);
+}
+
+static int alloc_hw_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs[i]) {
+ free_hw_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void free_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->descs[i]);
+
+ kfree(wq->descs);
+}
+
+static int alloc_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
+ GFP_KERNEL, node);
+ if (!wq->descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->descs[i]) {
+ free_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/* WQ control bits */
+int idxd_wq_alloc_resources(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_group *group = wq->group;
+ struct device *dev = &idxd->pdev->dev;
+ int rc, num_descs, i;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return 0;
+
+ num_descs = wq->size +
+ idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
+ wq->num_descs = num_descs;
+
+ rc = alloc_hw_descs(wq, num_descs);
+ if (rc < 0)
+ return rc;
+
+ wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
+ wq->compls = dma_alloc_coherent(dev, wq->compls_size,
+ &wq->compls_addr, GFP_KERNEL);
+ if (!wq->compls) {
+ rc = -ENOMEM;
+ goto fail_alloc_compls;
+ }
+
+ rc = alloc_descs(wq, num_descs);
+ if (rc < 0)
+ goto fail_alloc_descs;
+
+ rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
+ dev_to_node(dev));
+ if (rc < 0)
+ goto fail_sbitmap_init;
+
+ for (i = 0; i < num_descs; i++) {
+ struct idxd_desc *desc = wq->descs[i];
+
+ desc->hw = wq->hw_descs[i];
+ desc->completion = &wq->compls[i];
+ desc->compl_dma = wq->compls_addr +
+ sizeof(struct dsa_completion_record) * i;
+ desc->id = i;
+ desc->wq = wq;
+
+ dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+ desc->txd.tx_submit = idxd_dma_tx_submit;
+ }
+
+ return 0;
+
+ fail_sbitmap_init:
+ free_descs(wq);
+ fail_alloc_descs:
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ fail_alloc_compls:
+ free_hw_descs(wq);
+ return rc;
+}
+
+void idxd_wq_free_resources(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return;
+
+ free_hw_descs(wq);
+ free_descs(wq);
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ sbitmap_free(&wq->sbmap);
+}
+
+int idxd_wq_enable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d already enabled\n", wq->id);
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
+ dev_dbg(dev, "WQ enable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_ENABLED;
+ dev_dbg(dev, "WQ %d enabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_disable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status, operand;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ dev_dbg(dev, "Disabling WQ %d\n", wq->id);
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return 0;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "WQ disable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_DISABLED;
+ dev_dbg(dev, "WQ %d disabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_map_portal(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ resource_size_t start;
+
+ start = pci_resource_start(pdev, IDXD_WQ_BAR);
+ start = start + wq->id * IDXD_PORTAL_SIZE;
+
+ wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ if (!wq->dportal)
+ return -ENOMEM;
+ dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
+
+ return 0;
+}
+
+void idxd_wq_unmap_portal(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ devm_iounmap(dev, wq->dportal);
+}
+
+/* Device control bits */
+static inline bool idxd_is_enabled(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
+ return true;
+ return false;
+}
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+{
+ u32 sts, to = timeout;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ while (sts & IDXD_CMDSTS_ACTIVE && --to) {
+ cpu_relax();
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ }
+
+ if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
+ dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
+ *status = 0;
+ return -EBUSY;
+ }
+
+ *status = sts;
+ return 0;
+}
+
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+{
+ union idxd_command_reg cmd;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = cmd_code;
+ cmd.operand = operand;
+ dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
+ __func__, cmd_code, operand);
+ iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+ return 0;
+}
+
+int idxd_device_enable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device already enabled\n");
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was enabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ return -ENXIO;
+ }
+
+ idxd->state = IDXD_DEV_ENABLED;
+ return 0;
+}
+
+int idxd_device_disable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (!idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device is not enabled\n");
+ return 0;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was disabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ rc = -ENXIO;
+ return rc;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+ return 0;
+}
+
+int __idxd_device_reset(struct idxd_device *idxd)
+{
+ u32 status;
+ int rc;
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int idxd_device_reset(struct idxd_device *idxd)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = __idxd_device_reset(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return rc;
+}
+
+/* Device configuration bits */
+static void idxd_group_config_write(struct idxd_group *group)
+{
+ struct idxd_device *idxd = group->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+ u32 grpcfg_offset;
+
+ dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
+
+ /* setup GRPWQCFG */
+ for (i = 0; i < 4; i++) {
+ grpcfg_offset = idxd->grpcfg_offset +
+ group->id * 64 + i * sizeof(u64);
+ iowrite64(group->grpcfg.wqs[i],
+ idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+ group->id, i, grpcfg_offset,
+ ioread64(idxd->reg_base + grpcfg_offset));
+ }
+
+ /* setup GRPENGCFG */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+ iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+ grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
+
+ /* setup GRPFLAGS */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+ iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->id, grpcfg_offset,
+ ioread32(idxd->reg_base + grpcfg_offset));
+}
+
+static int idxd_groups_config_write(struct idxd_device *idxd)
+
+{
+ union gencfg_reg reg;
+ int i;
+ struct device *dev = &idxd->pdev->dev;
+
+ /* Setup bandwidth token limit */
+ if (idxd->token_limit) {
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ reg.token_limit = idxd->token_limit;
+ iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+ }
+
+ dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
+ ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ idxd_group_config_write(group);
+ }
+
+ return 0;
+}
+
+static int idxd_wq_config_write(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 wq_offset;
+ int i;
+
+ if (!wq->group)
+ return 0;
+
+ memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+
+ /* byte 0-3 */
+ wq->wqcfg.wq_size = wq->size;
+
+ if (wq->size == 0) {
+ dev_warn(dev, "Incorrect work queue size: 0\n");
+ return -EINVAL;
+ }
+
+ /* bytes 4-7 */
+ wq->wqcfg.wq_thresh = wq->threshold;
+
+ /* byte 8-11 */
+ wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+ wq->wqcfg.mode = 1;
+
+ wq->wqcfg.priority = wq->priority;
+
+ /* bytes 12-15 */
+ wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+ wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+
+ dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+ for (i = 0; i < 8; i++) {
+ wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ wq->id, i, wq_offset,
+ ioread32(idxd->reg_base + wq_offset));
+ }
+
+ return 0;
+}
+
+static int idxd_wqs_config_write(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ rc = idxd_wq_config_write(wq);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void idxd_group_flags_setup(struct idxd_device *idxd)
+{
+ int i;
+
+ /* TC-A 0 and TC-B 1 should be defaults */
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ if (group->tc_a == -1)
+ group->grpcfg.flags.tc_a = 0;
+ else
+ group->grpcfg.flags.tc_a = group->tc_a;
+ if (group->tc_b == -1)
+ group->grpcfg.flags.tc_b = 1;
+ else
+ group->grpcfg.flags.tc_b = group->tc_b;
+ group->grpcfg.flags.use_token_limit = group->use_token_limit;
+ group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
+ if (group->tokens_allowed)
+ group->grpcfg.flags.tokens_allowed =
+ group->tokens_allowed;
+ else
+ group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+ }
+}
+
+static int idxd_engines_setup(struct idxd_device *idxd)
+{
+ int i, engines = 0;
+ struct idxd_engine *eng;
+ struct idxd_group *group;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ group->grpcfg.engines = 0;
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ eng = &idxd->engines[i];
+ group = eng->group;
+
+ if (!group)
+ continue;
+
+ group->grpcfg.engines |= BIT(eng->id);
+ engines++;
+ }
+
+ if (!engines)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int idxd_wqs_setup(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct idxd_group *group;
+ int i, j, configured = 0;
+ struct device *dev = &idxd->pdev->dev;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ for (j = 0; j < 4; j++)
+ group->grpcfg.wqs[j] = 0;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = &idxd->wqs[i];
+ group = wq->group;
+
+ if (!wq->group)
+ continue;
+ if (!wq->size)
+ continue;
+
+ if (!wq_dedicated(wq)) {
+ dev_warn(dev, "No shared workqueue support.\n");
+ return -EINVAL;
+ }
+
+ group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
+ configured++;
+ }
+
+ if (configured == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int idxd_device_config(struct idxd_device *idxd)
+{
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_wqs_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_engines_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ idxd_group_flags_setup(idxd);
+
+ rc = idxd_wqs_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_groups_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
new file mode 100644
index 000000000000..c64c1429d160
--- /dev/null
+++ b/drivers/dma/idxd/dma.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+{
+ return container_of(c, struct idxd_wq, dma_chan);
+}
+
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct dmaengine_result res;
+ int complete = 1;
+
+ if (desc->completion->status == DSA_COMP_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (desc->completion->status)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else if (comp_type == IDXD_COMPLETE_ABORT)
+ res.result = DMA_TRANS_ABORTED;
+ else
+ complete = 0;
+
+ tx = &desc->txd;
+ if (complete && tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+}
+
+static void op_flag_setup(unsigned long flags, u32 *desc_flags)
+{
+ *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
+ if (flags & DMA_PREP_INTERRUPT)
+ *desc_flags |= IDXD_OP_FLAG_RCI;
+}
+
+static inline void set_completion_address(struct idxd_desc *desc,
+ u64 *compl_addr)
+{
+ *compl_addr = desc->compl_dma;
+}
+
+static inline void idxd_prep_desc_common(struct idxd_wq *wq,
+ struct dsa_hw_desc *hw, char opcode,
+ u64 addr_f1, u64 addr_f2, u64 len,
+ u64 compl, u32 flags)
+{
+ struct idxd_device *idxd = wq->idxd;
+
+ hw->flags = flags;
+ hw->opcode = opcode;
+ hw->src_addr = addr_f1;
+ hw->dst_addr = addr_f2;
+ hw->xfer_size = len;
+ hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ hw->completion_addr = compl;
+
+ /*
+ * Descriptor completion vectors are 1-8 for MSIX. We will round
+ * robin through the 8 vectors.
+ */
+ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ hw->int_handle = wq->vec_ptr;
+}
+
+static struct dma_async_tx_descriptor *
+idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ if (len > idxd->max_xfer_bytes)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
+ dma_src, dma_dest, len, desc->compl_dma,
+ desc_flags);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_get(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+ return 0;
+}
+
+static void idxd_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_put(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+}
+
+static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/*
+ * issue_pending() does not need to do anything since tx_submit() does the job
+ * already.
+ */
+static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+{
+}
+
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *c = tx->chan;
+ struct idxd_wq *wq = to_idxd_wq(c);
+ dma_cookie_t cookie;
+ int rc;
+ struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
+
+ cookie = dma_cookie_assign(tx);
+
+ rc = idxd_submit_desc(wq, desc);
+ if (rc < 0) {
+ idxd_free_desc(wq, desc);
+ return rc;
+ }
+
+ return cookie;
+}
+
+static void idxd_dma_release(struct dma_device *device)
+{
+}
+
+int idxd_register_dma_device(struct idxd_device *idxd)
+{
+ struct dma_device *dma = &idxd->dma_dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->dev = &idxd->pdev->dev;
+
+ dma->device_release = idxd_dma_release;
+
+ if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
+ }
+
+ dma->device_tx_status = idxd_dma_tx_status;
+ dma->device_issue_pending = idxd_dma_issue_pending;
+ dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+
+ return dma_async_device_register(&idxd->dma_dev);
+}
+
+void idxd_unregister_dma_device(struct idxd_device *idxd)
+{
+ dma_async_device_unregister(&idxd->dma_dev);
+}
+
+int idxd_register_dma_channel(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct dma_device *dma = &idxd->dma_dev;
+ struct dma_chan *chan = &wq->dma_chan;
+ int rc;
+
+ memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+ chan->device = dma;
+ list_add_tail(&chan->device_node, &dma->channels);
+ rc = dma_async_device_channel_register(dma, chan);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+void idxd_unregister_dma_channel(struct idxd_wq *wq)
+{
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
new file mode 100644
index 000000000000..b8f8a363b4a7
--- /dev/null
+++ b/drivers/dma/idxd/idxd.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_H_
+#define _IDXD_H_
+
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include "registers.h"
+
+#define IDXD_DRIVER_VERSION "1.00"
+
+extern struct kmem_cache *idxd_desc_pool;
+
+#define IDXD_REG_TIMEOUT 50
+#define IDXD_DRAIN_TIMEOUT 5000
+
+enum idxd_type {
+ IDXD_TYPE_UNKNOWN = -1,
+ IDXD_TYPE_DSA = 0,
+ IDXD_TYPE_MAX
+};
+
+#define IDXD_NAME_SIZE 128
+
+struct idxd_device_driver {
+ struct device_driver drv;
+};
+
+struct idxd_irq_entry {
+ struct idxd_device *idxd;
+ int id;
+ struct llist_head pending_llist;
+ struct list_head work_list;
+};
+
+struct idxd_group {
+ struct device conf_dev;
+ struct idxd_device *idxd;
+ struct grpcfg grpcfg;
+ int id;
+ int num_engines;
+ int num_wqs;
+ bool use_token_limit;
+ u8 tokens_allowed;
+ u8 tokens_reserved;
+ int tc_a;
+ int tc_b;
+};
+
+#define IDXD_MAX_PRIORITY 0xf
+
+enum idxd_wq_state {
+ IDXD_WQ_DISABLED = 0,
+ IDXD_WQ_ENABLED,
+};
+
+enum idxd_wq_flag {
+ WQ_FLAG_DEDICATED = 0,
+};
+
+enum idxd_wq_type {
+ IDXD_WQT_NONE = 0,
+ IDXD_WQT_KERNEL,
+ IDXD_WQT_USER,
+};
+
+struct idxd_cdev {
+ struct cdev cdev;
+ struct device *dev;
+ int minor;
+ struct wait_queue_head err_queue;
+};
+
+#define IDXD_ALLOCATED_BATCH_SIZE 128U
+#define WQ_NAME_SIZE 1024
+#define WQ_TYPE_SIZE 10
+
+enum idxd_op_type {
+ IDXD_OP_BLOCK = 0,
+ IDXD_OP_NONBLOCK = 1,
+};
+
+enum idxd_complete_type {
+ IDXD_COMPLETE_NORMAL = 0,
+ IDXD_COMPLETE_ABORT,
+};
+
+struct idxd_wq {
+ void __iomem *dportal;
+ struct device conf_dev;
+ struct idxd_cdev idxd_cdev;
+ struct idxd_device *idxd;
+ int id;
+ enum idxd_wq_type type;
+ struct idxd_group *group;
+ int client_count;
+ struct mutex wq_lock; /* mutex for workqueue */
+ u32 size;
+ u32 threshold;
+ u32 priority;
+ enum idxd_wq_state state;
+ unsigned long flags;
+ union wqcfg wqcfg;
+ atomic_t dq_count; /* dedicated queue flow control */
+ u32 vec_ptr; /* interrupt steering */
+ struct dsa_hw_desc **hw_descs;
+ int num_descs;
+ struct dsa_completion_record *compls;
+ dma_addr_t compls_addr;
+ int compls_size;
+ struct idxd_desc **descs;
+ struct sbitmap sbmap;
+ struct dma_chan dma_chan;
+ struct percpu_rw_semaphore submit_lock;
+ wait_queue_head_t submit_waitq;
+ char name[WQ_NAME_SIZE + 1];
+};
+
+struct idxd_engine {
+ struct device conf_dev;
+ int id;
+ struct idxd_group *group;
+ struct idxd_device *idxd;
+};
+
+/* shadow registers */
+struct idxd_hw {
+ u32 version;
+ union gen_cap_reg gen_cap;
+ union wq_cap_reg wq_cap;
+ union group_cap_reg group_cap;
+ union engine_cap_reg engine_cap;
+ struct opcap opcap;
+};
+
+enum idxd_device_state {
+ IDXD_DEV_HALTED = -1,
+ IDXD_DEV_DISABLED = 0,
+ IDXD_DEV_CONF_READY,
+ IDXD_DEV_ENABLED,
+};
+
+enum idxd_device_flag {
+ IDXD_FLAG_CONFIGURABLE = 0,
+};
+
+struct idxd_device {
+ enum idxd_type type;
+ struct device conf_dev;
+ struct list_head list;
+ struct idxd_hw hw;
+ enum idxd_device_state state;
+ unsigned long flags;
+ int id;
+ int major;
+
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+
+ spinlock_t dev_lock; /* spinlock for device */
+ struct idxd_group *groups;
+ struct idxd_wq *wqs;
+ struct idxd_engine *engines;
+
+ int num_groups;
+
+ u32 msix_perm_offset;
+ u32 wqcfg_offset;
+ u32 grpcfg_offset;
+ u32 perfmon_offset;
+
+ u64 max_xfer_bytes;
+ u32 max_batch_size;
+ int max_groups;
+ int max_engines;
+ int max_tokens;
+ int max_wqs;
+ int max_wq_size;
+ int token_limit;
+ int nr_tokens; /* non-reserved tokens */
+
+ union sw_err_reg sw_err;
+
+ struct msix_entry *msix_entries;
+ int num_wq_irqs;
+ struct idxd_irq_entry *irq_entries;
+
+ struct dma_device dma_dev;
+};
+
+/* IDXD software descriptor */
+struct idxd_desc {
+ struct dsa_hw_desc *hw;
+ dma_addr_t desc_dma;
+ struct dsa_completion_record *completion;
+ dma_addr_t compl_dma;
+ struct dma_async_tx_descriptor txd;
+ struct llist_node llnode;
+ struct list_head list;
+ int id;
+ struct idxd_wq *wq;
+};
+
+#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
+#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+
+extern struct bus_type dsa_bus_type;
+
+static inline bool wq_dedicated(struct idxd_wq *wq)
+{
+ return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+enum idxd_portal_prot {
+ IDXD_PORTAL_UNLIMITED = 0,
+ IDXD_PORTAL_LIMITED,
+};
+
+static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
+{
+ return prot * 0x1000;
+}
+
+static inline int idxd_get_wq_portal_full_offset(int wq_id,
+ enum idxd_portal_prot prot)
+{
+ return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+}
+
+static inline void idxd_set_type(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+ idxd->type = IDXD_TYPE_DSA;
+ else
+ idxd->type = IDXD_TYPE_UNKNOWN;
+}
+
+static inline void idxd_wq_get(struct idxd_wq *wq)
+{
+ wq->client_count++;
+}
+
+static inline void idxd_wq_put(struct idxd_wq *wq)
+{
+ wq->client_count--;
+}
+
+static inline int idxd_wq_refcount(struct idxd_wq *wq)
+{
+ return wq->client_count;
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd);
+int idxd_register_bus_type(void);
+void idxd_unregister_bus_type(void);
+int idxd_setup_sysfs(struct idxd_device *idxd);
+void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_driver(void);
+void idxd_unregister_driver(void);
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+
+/* device interrupt control */
+irqreturn_t idxd_irq_handler(int vec, void *data);
+irqreturn_t idxd_misc_thread(int vec, void *data);
+irqreturn_t idxd_wq_thread(int irq, void *data);
+void idxd_mask_error_interrupts(struct idxd_device *idxd);
+void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+void idxd_mask_msix_vectors(struct idxd_device *idxd);
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
+
+/* device control */
+int idxd_device_enable(struct idxd_device *idxd);
+int idxd_device_disable(struct idxd_device *idxd);
+int idxd_device_reset(struct idxd_device *idxd);
+int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_cleanup(struct idxd_device *idxd);
+int idxd_device_config(struct idxd_device *idxd);
+void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+
+/* work queue control */
+int idxd_wq_alloc_resources(struct idxd_wq *wq);
+void idxd_wq_free_resources(struct idxd_wq *wq);
+int idxd_wq_enable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_map_portal(struct idxd_wq *wq);
+void idxd_wq_unmap_portal(struct idxd_wq *wq);
+
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
+/* dmaengine */
+int idxd_register_dma_device(struct idxd_device *idxd);
+void idxd_unregister_dma_device(struct idxd_device *idxd);
+int idxd_register_dma_channel(struct idxd_wq *wq);
+void idxd_unregister_dma_channel(struct idxd_wq *wq);
+void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type);
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+/* cdev */
+int idxd_cdev_register(void);
+void idxd_cdev_remove(void);
+int idxd_cdev_get_major(struct idxd_device *idxd);
+int idxd_wq_add_cdev(struct idxd_wq *wq);
+void idxd_wq_del_cdev(struct idxd_wq *wq);
+
+#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
new file mode 100644
index 000000000000..7778c05deb5d
--- /dev/null
+++ b/drivers/dma/idxd/init.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <uapi/linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+
+#define DRV_NAME "idxd"
+
+static struct idr idxd_idrs[IDXD_TYPE_MAX];
+static struct mutex idxd_idr_lock;
+
+static struct pci_device_id idxd_pci_tbl[] = {
+ /* DSA ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+
+static char *idxd_name[] = {
+ "dsa",
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd)
+{
+ return idxd_name[idxd->type];
+}
+
+static int idxd_setup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+ int rc = 0;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt < 0) {
+ dev_err(dev, "Not MSI-X interrupt capable.\n");
+ goto err_no_irq;
+ }
+
+ idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+ msixcnt, GFP_KERNEL);
+ if (!idxd->msix_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++)
+ idxd->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+ if (rc) {
+ dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+
+ /*
+ * We implement 1 completion list per MSI-X entry except for
+ * entry 0, which is for errors and others.
+ */
+ idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+ sizeof(struct idxd_irq_entry),
+ GFP_KERNEL);
+ if (!idxd->irq_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++) {
+ idxd->irq_entries[i].id = i;
+ idxd->irq_entries[i].idxd = idxd;
+ }
+
+ msix = &idxd->msix_entries[0];
+ irq_entry = &idxd->irq_entries[0];
+ rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+ idxd_misc_thread, 0, "idxd-misc",
+ irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate misc interrupt.\n");
+ goto err_no_irq;
+ }
+
+ dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+ msix->vector);
+
+ /* first MSI-X entry is not for wq interrupts */
+ idxd->num_wq_irqs = msixcnt - 1;
+
+ for (i = 1; i < msixcnt; i++) {
+ msix = &idxd->msix_entries[i];
+ irq_entry = &idxd->irq_entries[i];
+
+ init_llist_head(&idxd->irq_entries[i].pending_llist);
+ INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+ rc = devm_request_threaded_irq(dev, msix->vector,
+ idxd_irq_handler,
+ idxd_wq_thread, 0,
+ "idxd-portal", irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate irq %d.\n",
+ msix->vector);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+ i, msix->vector);
+ }
+
+ idxd_unmask_error_interrupts(idxd);
+
+ return 0;
+
+ err_no_irq:
+ /* Disable error interrupt generation */
+ idxd_mask_error_interrupts(idxd);
+ pci_disable_msix(pdev);
+ dev_err(dev, "No usable interrupts\n");
+ return rc;
+}
+
+static void idxd_wqs_free_lock(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ percpu_free_rwsem(&wq->submit_lock);
+ }
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+ sizeof(struct idxd_group), GFP_KERNEL);
+ if (!idxd->groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ idxd->groups[i].idxd = idxd;
+ idxd->groups[i].id = i;
+ idxd->groups[i].tc_a = -1;
+ idxd->groups[i].tc_b = -1;
+ }
+
+ idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+ GFP_KERNEL);
+ if (!idxd->wqs)
+ return -ENOMEM;
+
+ idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+ sizeof(struct idxd_engine), GFP_KERNEL);
+ if (!idxd->engines)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+ int rc;
+
+ wq->id = i;
+ wq->idxd = idxd;
+ mutex_init(&wq->wq_lock);
+ atomic_set(&wq->dq_count, 0);
+ init_waitqueue_head(&wq->submit_waitq);
+ wq->idxd_cdev.minor = -1;
+ rc = percpu_init_rwsem(&wq->submit_lock);
+ if (rc < 0) {
+ idxd_wqs_free_lock(idxd);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ idxd->engines[i].idxd = idxd;
+ idxd->engines[i].id = i;
+ }
+
+ return 0;
+}
+
+static void idxd_read_table_offsets(struct idxd_device *idxd)
+{
+ union offsets_reg offsets;
+ struct device *dev = &idxd->pdev->dev;
+
+ offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
+ + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
+ idxd->wqcfg_offset = offsets.wqcfg * 0x100;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
+ idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * 0x100;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
+ idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * 0x100;
+ dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
+}
+
+static void idxd_read_caps(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ /* reading generic capabilities */
+ idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
+ dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+ idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
+ dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
+ idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+ dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+ if (idxd->hw.gen_cap.config_en)
+ set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
+
+ /* reading group capabilities */
+ idxd->hw.group_cap.bits =
+ ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
+ dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
+ idxd->max_groups = idxd->hw.group_cap.num_groups;
+ dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
+ idxd->max_tokens = idxd->hw.group_cap.total_tokens;
+ dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
+ idxd->nr_tokens = idxd->max_tokens;
+
+ /* read engine capabilities */
+ idxd->hw.engine_cap.bits =
+ ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
+ dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
+ idxd->max_engines = idxd->hw.engine_cap.num_engines;
+ dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
+
+ /* read workqueue capabilities */
+ idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
+ dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
+ idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
+ dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+ idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+ dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+
+ /* reading operation capabilities */
+ for (i = 0; i < 4; i++) {
+ idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
+ IDXD_OPCAP_OFFSET + i * sizeof(u64));
+ dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
+ }
+}
+
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
+ void __iomem * const *iomap)
+{
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+
+ idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ if (!idxd)
+ return NULL;
+
+ idxd->pdev = pdev;
+ idxd->reg_base = iomap[IDXD_MMIO_BAR];
+ spin_lock_init(&idxd->dev_lock);
+
+ return idxd;
+}
+
+static int idxd_probe(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ dev_dbg(dev, "%s entered and resetting device\n", __func__);
+ rc = idxd_device_reset(idxd);
+ if (rc < 0)
+ return rc;
+ dev_dbg(dev, "IDXD reset complete\n");
+
+ idxd_read_caps(idxd);
+ idxd_read_table_offsets(idxd);
+
+ rc = idxd_setup_internals(idxd);
+ if (rc)
+ goto err_setup;
+
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ goto err_setup;
+
+ dev_dbg(dev, "IDXD interrupt setup complete.\n");
+
+ mutex_lock(&idxd_idr_lock);
+ idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+ mutex_unlock(&idxd_idr_lock);
+ if (idxd->id < 0) {
+ rc = -ENOMEM;
+ goto err_idr_fail;
+ }
+
+ idxd->major = idxd_cdev_get_major(idxd);
+
+ dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+ return 0;
+
+ err_idr_fail:
+ idxd_mask_error_interrupts(idxd);
+ idxd_mask_msix_vectors(idxd);
+ err_setup:
+ return rc;
+}
+
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+ int rc;
+ unsigned int mask;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Mapping BARs\n");
+ mask = (1 << IDXD_MMIO_BAR);
+ rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
+ if (rc)
+ return rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, iomap);
+ if (!idxd)
+ return -ENOMEM;
+
+ idxd_set_type(idxd);
+
+ dev_dbg(dev, "Set PCI master\n");
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, idxd);
+
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ return -ENODEV;
+ }
+
+ rc = idxd_setup_sysfs(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ return -ENODEV;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+
+ dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ idxd->hw.version);
+
+ return 0;
+}
+
+static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *itr;
+ struct llist_node *head;
+
+ head = llist_del_all(&ie->pending_llist);
+ if (!head)
+ return;
+
+ llist_for_each_entry_safe(desc, itr, head, llnode) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_flush_work_list(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *iter;
+
+ list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+ list_del(&desc->list);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_shutdown(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ int rc, i;
+ struct idxd_irq_entry *irq_entry;
+ int msixcnt = pci_msix_vec_count(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc)
+ dev_err(&pdev->dev, "Disabling device failed\n");
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_mask_msix_vectors(idxd);
+ idxd_mask_error_interrupts(idxd);
+
+ for (i = 0; i < msixcnt; i++) {
+ irq_entry = &idxd->irq_entries[i];
+ synchronize_irq(idxd->msix_entries[i].vector);
+ if (i == 0)
+ continue;
+ idxd_flush_pending_llist(irq_entry);
+ idxd_flush_work_list(irq_entry);
+ }
+}
+
+static void idxd_remove(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_cleanup_sysfs(idxd);
+ idxd_shutdown(pdev);
+ idxd_wqs_free_lock(idxd);
+ mutex_lock(&idxd_idr_lock);
+ idr_remove(&idxd_idrs[idxd->type], idxd->id);
+ mutex_unlock(&idxd_idr_lock);
+}
+
+static struct pci_driver idxd_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = idxd_pci_tbl,
+ .probe = idxd_pci_probe,
+ .remove = idxd_remove,
+ .shutdown = idxd_shutdown,
+};
+
+static int __init idxd_init_module(void)
+{
+ int err, i;
+
+ /*
+ * If the CPU does not support write512, there's no point in
+ * enumerating the device. We can not utilize it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+ return -ENODEV;
+ }
+
+ pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
+ DRV_NAME, IDXD_DRIVER_VERSION);
+
+ mutex_init(&idxd_idr_lock);
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ idr_init(&idxd_idrs[i]);
+
+ err = idxd_register_bus_type();
+ if (err < 0)
+ return err;
+
+ err = idxd_register_driver();
+ if (err < 0)
+ goto err_idxd_driver_register;
+
+ err = idxd_cdev_register();
+ if (err)
+ goto err_cdev_register;
+
+ err = pci_register_driver(&idxd_pci_driver);
+ if (err)
+ goto err_pci_register;
+
+ return 0;
+
+err_pci_register:
+ idxd_cdev_remove();
+err_cdev_register:
+ idxd_unregister_driver();
+err_idxd_driver_register:
+ idxd_unregister_bus_type();
+ return err;
+}
+module_init(idxd_init_module);
+
+static void __exit idxd_exit_module(void)
+{
+ pci_unregister_driver(&idxd_pci_driver);
+ idxd_cdev_remove();
+ idxd_unregister_bus_type();
+}
+module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
new file mode 100644
index 000000000000..d6fcd2e60103
--- /dev/null
+++ b/drivers/dma/idxd/irq.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->state = IDXD_WQ_DISABLED;
+ }
+}
+
+static int idxd_restart(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ rc = __idxd_device_reset(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_config(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_warn(&idxd->pdev->dev,
+ "Unable to re-enable wq %s\n",
+ dev_name(&wq->conf_dev));
+ }
+ }
+ }
+
+ return 0;
+
+ out:
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ return rc;
+}
+
+irqreturn_t idxd_irq_handler(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+
+ idxd_mask_msix_vector(idxd, irq_entry->id);
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ union gensts_reg gensts;
+ u32 cause, val = 0;
+ int i, rc;
+ bool err = false;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ if (cause & IDXD_INTC_ERR) {
+ spin_lock_bh(&idxd->dev_lock);
+ for (i = 0; i < 4; i++)
+ idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+ IDXD_SWERR_OFFSET + i * sizeof(u64));
+ iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ int id = idxd->sw_err.wq_idx;
+ struct idxd_wq *wq = &idxd->wqs[id];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ } else {
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ }
+ }
+
+ spin_unlock_bh(&idxd->dev_lock);
+ val |= IDXD_INTC_ERR;
+
+ for (i = 0; i < 4; i++)
+ dev_warn(dev, "err[%d]: %#16.16llx\n",
+ i, idxd->sw_err.bits[i]);
+ err = true;
+ }
+
+ if (cause & IDXD_INTC_CMD) {
+ /* Driver does use command interrupts */
+ val |= IDXD_INTC_CMD;
+ }
+
+ if (cause & IDXD_INTC_OCCUPY) {
+ /* Driver does not utilize occupancy interrupt */
+ val |= IDXD_INTC_OCCUPY;
+ }
+
+ if (cause & IDXD_INTC_PERFMON_OVFL) {
+ /*
+ * Driver does not utilize perfmon counter overflow interrupt
+ * yet.
+ */
+ val |= IDXD_INTC_PERFMON_OVFL;
+ }
+
+ val ^= cause;
+ if (val)
+ dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
+ val);
+
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (!err)
+ return IRQ_HANDLED;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ spin_lock_bh(&idxd->dev_lock);
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ rc = idxd_restart(idxd);
+ if (rc < 0)
+ dev_err(&idxd->pdev->dev,
+ "idxd restart failed, device halt.");
+ } else {
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need %s.\n",
+ gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
+ "FLR" : "system reset");
+ }
+ spin_unlock_bh(&idxd->dev_lock);
+ }
+
+ idxd_unmask_msix_vector(idxd, irq_entry->id);
+ return IRQ_HANDLED;
+}
+
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct idxd_desc *desc, *t;
+ struct llist_node *head;
+ int queued = 0;
+
+ head = llist_del_all(&irq_entry->pending_llist);
+ if (!head)
+ return 0;
+
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ list_add_tail(&desc->list, &irq_entry->work_list);
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct list_head *node, *next;
+ int queued = 0;
+
+ if (list_empty(&irq_entry->work_list))
+ return 0;
+
+ list_for_each_safe(node, next, &irq_entry->work_list) {
+ struct idxd_desc *desc =
+ container_of(node, struct idxd_desc, list);
+
+ if (desc->completion->status) {
+ list_del(&desc->list);
+ /* process and callback */
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ int rc, processed = 0, retry = 0;
+
+ /*
+ * There are two lists we are processing. The pending_llist is where
+ * submmiter adds all the submitted descriptor after sending it to
+ * the workqueue. It's a lockless singly linked list. The work_list
+ * is the common linux double linked list. We are in a scenario of
+ * multiple producers and a single consumer. The producers are all
+ * the kernel submitters of descriptors, and the consumer is the
+ * kernel irq handler thread for the msix vector when using threaded
+ * irq. To work with the restrictions of llist to remain lockless,
+ * we are doing the following steps:
+ * 1. Iterate through the work_list and process any completed
+ * descriptor. Delete the completed entries during iteration.
+ * 2. llist_del_all() from the pending list.
+ * 3. Iterate through the llist that was deleted from the pending list
+ * and process the completed entries.
+ * 4. If the entry is still waiting on hardware, list_add_tail() to
+ * the work_list.
+ * 5. Repeat until no more descriptors.
+ */
+ do {
+ rc = irq_process_work_list(irq_entry, &processed);
+ if (rc != 0) {
+ retry++;
+ continue;
+ }
+
+ rc = irq_process_pending_llist(irq_entry, &processed);
+ } while (rc != 0 && retry != 10);
+
+ idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+
+ if (processed == 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
new file mode 100644
index 000000000000..a39e7ae6b3d9
--- /dev/null
+++ b/drivers/dma/idxd/registers.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_REGISTERS_H_
+#define _IDXD_REGISTERS_H_
+
+/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+
+#define IDXD_MMIO_BAR 0
+#define IDXD_WQ_BAR 2
+#define IDXD_PORTAL_SIZE 0x4000
+
+/* MMIO Device BAR0 Registers */
+#define IDXD_VER_OFFSET 0x00
+#define IDXD_VER_MAJOR_MASK 0xf0
+#define IDXD_VER_MINOR_MASK 0x0f
+#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4)
+#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK)
+
+union gen_cap_reg {
+ struct {
+ u64 block_on_fault:1;
+ u64 overlap_copy:1;
+ u64 cache_control_mem:1;
+ u64 cache_control_cache:1;
+ u64 rsvd:3;
+ u64 int_handle_req:1;
+ u64 dest_readback:1;
+ u64 drain_readback:1;
+ u64 rsvd2:6;
+ u64 max_xfer_shift:5;
+ u64 max_batch_shift:4;
+ u64 max_ims_mult:6;
+ u64 config_en:1;
+ u64 max_descs_per_engine:8;
+ u64 rsvd3:24;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GENCAP_OFFSET 0x10
+
+union wq_cap_reg {
+ struct {
+ u64 total_wq_size:16;
+ u64 num_wqs:8;
+ u64 rsvd:24;
+ u64 shared_mode:1;
+ u64 dedicated_mode:1;
+ u64 rsvd2:1;
+ u64 priority:1;
+ u64 occupancy:1;
+ u64 occupancy_int:1;
+ u64 rsvd3:10;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_WQCAP_OFFSET 0x20
+
+union group_cap_reg {
+ struct {
+ u64 num_groups:8;
+ u64 total_tokens:8;
+ u64 token_en:1;
+ u64 token_limit:1;
+ u64 rsvd:46;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GRPCAP_OFFSET 0x30
+
+union engine_cap_reg {
+ struct {
+ u64 num_engines:8;
+ u64 rsvd:56;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_ENGCAP_OFFSET 0x38
+
+#define IDXD_OPCAP_NOOP 0x0001
+#define IDXD_OPCAP_BATCH 0x0002
+#define IDXD_OPCAP_MEMMOVE 0x0008
+struct opcap {
+ u64 bits[4];
+};
+
+#define IDXD_OPCAP_OFFSET 0x40
+
+#define IDXD_TABLE_OFFSET 0x60
+union offsets_reg {
+ struct {
+ u64 grpcfg:16;
+ u64 wqcfg:16;
+ u64 msix_perm:16;
+ u64 ims:16;
+ u64 perfmon:16;
+ u64 rsvd:48;
+ };
+ u64 bits[2];
+} __packed;
+
+#define IDXD_GENCFG_OFFSET 0x80
+union gencfg_reg {
+ struct {
+ u32 token_limit:8;
+ u32 rsvd:4;
+ u32 user_int_en:1;
+ u32 rsvd2:19;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENCTRL_OFFSET 0x88
+union genctrl_reg {
+ struct {
+ u32 softerr_int_en:1;
+ u32 rsvd:31;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENSTATS_OFFSET 0x90
+union gensts_reg {
+ struct {
+ u32 state:2;
+ u32 reset_type:2;
+ u32 rsvd:28;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_device_status_state {
+ IDXD_DEVICE_STATE_DISABLED = 0,
+ IDXD_DEVICE_STATE_ENABLED,
+ IDXD_DEVICE_STATE_DRAIN,
+ IDXD_DEVICE_STATE_HALT,
+};
+
+enum idxd_device_reset_type {
+ IDXD_DEVICE_RESET_SOFTWARE = 0,
+ IDXD_DEVICE_RESET_FLR,
+ IDXD_DEVICE_RESET_WARM,
+ IDXD_DEVICE_RESET_COLD,
+};
+
+#define IDXD_INTCAUSE_OFFSET 0x98
+#define IDXD_INTC_ERR 0x01
+#define IDXD_INTC_CMD 0x02
+#define IDXD_INTC_OCCUPY 0x04
+#define IDXD_INTC_PERFMON_OVFL 0x08
+
+#define IDXD_CMD_OFFSET 0xa0
+union idxd_command_reg {
+ struct {
+ u32 operand:20;
+ u32 cmd:5;
+ u32 rsvd:6;
+ u32 int_req:1;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_cmd {
+ IDXD_CMD_ENABLE_DEVICE = 1,
+ IDXD_CMD_DISABLE_DEVICE,
+ IDXD_CMD_DRAIN_ALL,
+ IDXD_CMD_ABORT_ALL,
+ IDXD_CMD_RESET_DEVICE,
+ IDXD_CMD_ENABLE_WQ,
+ IDXD_CMD_DISABLE_WQ,
+ IDXD_CMD_DRAIN_WQ,
+ IDXD_CMD_ABORT_WQ,
+ IDXD_CMD_RESET_WQ,
+ IDXD_CMD_DRAIN_PASID,
+ IDXD_CMD_ABORT_PASID,
+ IDXD_CMD_REQUEST_INT_HANDLE,
+};
+
+#define IDXD_CMDSTS_OFFSET 0xa8
+union cmdsts_reg {
+ struct {
+ u8 err;
+ u16 result;
+ u8 rsvd:7;
+ u8 active:1;
+ };
+ u32 bits;
+} __packed;
+#define IDXD_CMDSTS_ACTIVE 0x80000000
+
+enum idxd_cmdsts_err {
+ IDXD_CMDSTS_SUCCESS = 0,
+ IDXD_CMDSTS_INVAL_CMD,
+ IDXD_CMDSTS_INVAL_WQIDX,
+ IDXD_CMDSTS_HW_ERR,
+ /* enable device errors */
+ IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
+ IDXD_CMDSTS_ERR_CONFIG,
+ IDXD_CMDSTS_ERR_BUSMASTER_EN,
+ IDXD_CMDSTS_ERR_PASID_INVAL,
+ IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
+ IDXD_CMDSTS_ERR_GRP_CONFIG,
+ IDXD_CMDSTS_ERR_GRP_CONFIG2,
+ IDXD_CMDSTS_ERR_GRP_CONFIG3,
+ IDXD_CMDSTS_ERR_GRP_CONFIG4,
+ /* enable wq errors */
+ IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
+ IDXD_CMDSTS_ERR_WQ_ENABLED,
+ IDXD_CMDSTS_ERR_WQ_SIZE,
+ IDXD_CMDSTS_ERR_WQ_PRIOR,
+ IDXD_CMDSTS_ERR_WQ_MODE,
+ IDXD_CMDSTS_ERR_BOF_EN,
+ IDXD_CMDSTS_ERR_PASID_EN,
+ IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
+ IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
+ /* disable device errors */
+ IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
+ /* disable WQ, drain WQ, abort WQ, reset WQ */
+ IDXD_CMDSTS_ERR_DEV_NOT_EN,
+ /* request interrupt handle */
+ IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
+ IDXD_CMDSTS_ERR_NO_HANDLE,
+};
+
+#define IDXD_SWERR_OFFSET 0xc0
+#define IDXD_SWERR_VALID 0x00000001
+#define IDXD_SWERR_OVERFLOW 0x00000002
+#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
+union sw_err_reg {
+ struct {
+ u64 valid:1;
+ u64 overflow:1;
+ u64 desc_valid:1;
+ u64 wq_idx_valid:1;
+ u64 batch:1;
+ u64 fault_rw:1;
+ u64 priv:1;
+ u64 rsvd:1;
+ u64 error:8;
+ u64 wq_idx:8;
+ u64 rsvd2:8;
+ u64 operation:8;
+ u64 pasid:20;
+ u64 rsvd3:4;
+
+ u64 batch_idx:16;
+ u64 rsvd4:16;
+ u64 invalid_flags:32;
+
+ u64 fault_addr;
+
+ u64 rsvd5;
+ };
+ u64 bits[4];
+} __packed;
+
+union msix_perm {
+ struct {
+ u32 rsvd:2;
+ u32 ignore:1;
+ u32 pasid_en:1;
+ u32 rsvd2:8;
+ u32 pasid:20;
+ };
+ u32 bits;
+} __packed;
+
+union group_flags {
+ struct {
+ u32 tc_a:3;
+ u32 tc_b:3;
+ u32 rsvd:1;
+ u32 use_token_limit:1;
+ u32 tokens_reserved:8;
+ u32 rsvd2:4;
+ u32 tokens_allowed:8;
+ u32 rsvd3:4;
+ };
+ u32 bits;
+} __packed;
+
+struct grpcfg {
+ u64 wqs[4];
+ u64 engines;
+ union group_flags flags;
+} __packed;
+
+union wqcfg {
+ struct {
+ /* bytes 0-3 */
+ u16 wq_size;
+ u16 rsvd;
+
+ /* bytes 4-7 */
+ u16 wq_thresh;
+ u16 rsvd1;
+
+ /* bytes 8-11 */
+ u32 mode:1; /* shared or dedicated */
+ u32 bof:1; /* block on fault */
+ u32 rsvd2:2;
+ u32 priority:4;
+ u32 pasid:20;
+ u32 pasid_en:1;
+ u32 priv:1;
+ u32 rsvd3:2;
+
+ /* bytes 12-15 */
+ u32 max_xfer_shift:5;
+ u32 max_batch_shift:4;
+ u32 rsvd4:23;
+
+ /* bytes 16-19 */
+ u16 occupancy_inth;
+ u16 occupancy_table_sel:1;
+ u16 rsvd5:15;
+
+ /* bytes 20-23 */
+ u16 occupancy_limit;
+ u16 occupancy_int_en:1;
+ u16 rsvd6:15;
+
+ /* bytes 24-27 */
+ u16 occupancy;
+ u16 occupancy_int:1;
+ u16 rsvd7:12;
+ u16 mode_support:1;
+ u16 wq_state:2;
+
+ /* bytes 28-31 */
+ u32 rsvd8;
+ };
+ u32 bits[8];
+} __packed;
+#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
new file mode 100644
index 000000000000..45a0c5869a0a
--- /dev/null
+++ b/drivers/dma/idxd/submit.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+ struct idxd_desc *desc;
+ int idx;
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+
+ if (optype == IDXD_OP_BLOCK)
+ percpu_down_read(&wq->submit_lock);
+ else if (!percpu_down_read_trylock(&wq->submit_lock))
+ return ERR_PTR(-EBUSY);
+
+ if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+ int rc;
+
+ if (optype == IDXD_OP_NONBLOCK) {
+ percpu_up_read(&wq->submit_lock);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ percpu_up_read(&wq->submit_lock);
+ percpu_down_write(&wq->submit_lock);
+ rc = wait_event_interruptible(wq->submit_waitq,
+ atomic_add_unless(&wq->dq_count,
+ 1, wq->size) ||
+ idxd->state != IDXD_DEV_ENABLED);
+ percpu_up_write(&wq->submit_lock);
+ if (rc < 0)
+ return ERR_PTR(-EINTR);
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+ } else {
+ percpu_up_read(&wq->submit_lock);
+ }
+
+ idx = sbitmap_get(&wq->sbmap, 0, false);
+ if (idx < 0) {
+ atomic_dec(&wq->dq_count);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ desc = wq->descs[idx];
+ memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+ memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+ return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ atomic_dec(&wq->dq_count);
+
+ sbitmap_clear_bit(&wq->sbmap, desc->id);
+ wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int vec = desc->hw->int_handle;
+ void __iomem *portal;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -EIO;
+
+ portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+ /*
+ * The wmb() flushes writes to coherent DMA data before possibly
+ * triggering a DMA read. The wmb() is necessary even on UP because
+ * the recipient is a device.
+ */
+ wmb();
+ iosubmit_cmds512(portal, desc->hw, 1);
+
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+ */
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI)
+ llist_add(&desc->llnode,
+ &idxd->irq_entries[vec].pending_llist);
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
new file mode 100644
index 000000000000..849c50ab939a
--- /dev/null
+++ b/drivers/dma/idxd/sysfs.c
@@ -0,0 +1,1528 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+static char *idxd_wq_type_names[] = {
+ [IDXD_WQT_NONE] = "none",
+ [IDXD_WQT_KERNEL] = "kernel",
+ [IDXD_WQT_USER] = "user",
+};
+
+static void idxd_conf_device_release(struct device *dev)
+{
+ dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+}
+
+static struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+};
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return dev ? dev->type == &dsa_device_type : false;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return dev ? dev->type == &idxd_wq_device_type : false;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL &&
+ strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER ? true : false;
+}
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ int matched = 0;
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY)
+ return 0;
+ matched = 1;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state < IDXD_DEV_CONF_READY)
+ return 0;
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "%s not disabled\n", dev_name(dev));
+ return 0;
+ }
+ matched = 1;
+ }
+
+ if (matched)
+ dev_dbg(dev, "%s matched\n", dev_name(dev));
+
+ return matched;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY) {
+ dev_warn(dev, "Device not ready for config\n");
+ return -EBUSY;
+ }
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENXIO;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+
+ /* Perform IDXD configuration and enabling */
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device config failed: %d\n", rc);
+ return rc;
+ }
+
+ /* start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device enable failed: %d\n", rc);
+ return rc;
+ }
+
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_info(dev, "Device %s enabled\n", dev_name(dev));
+
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_dbg(dev, "Failed to register dmaengine device\n");
+ return rc;
+ }
+ return 0;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Enabling while device not enabled.\n");
+ return -EPERM;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+ return -EBUSY;
+ }
+
+ if (!wq->group) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ not attached to group.\n");
+ return -EINVAL;
+ }
+
+ if (strlen(wq->name) == 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ name not set.\n");
+ return -EINVAL;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ resource alloc failed\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Writing WQ %d config failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d enabling failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ dev_warn(dev, "IDXD wq disable failed\n");
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+
+ wq->client_count = 0;
+
+ dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+ if (is_idxd_wq_dmaengine(wq)) {
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "DMA channel register failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ } else if (is_idxd_wq_cdev(wq)) {
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Cdev creation failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ unsigned long flags;
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+ if (wq->state == IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ return;
+ }
+
+ if (is_idxd_wq_dmaengine(wq))
+ idxd_unregister_dma_channel(wq);
+ else if (is_idxd_wq_cdev(wq))
+ idxd_wq_del_cdev(wq);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_wq_disable(wq);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ idxd_wq_free_resources(wq);
+ wq->client_count = 0;
+ mutex_unlock(&wq->wq_lock);
+
+ if (rc < 0)
+ dev_warn(dev, "Failed to disable %s: %d\n",
+ dev_name(&wq->conf_dev), rc);
+ else
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+}
+
+static int idxd_config_bus_remove(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
+
+ /* disable workqueue here */
+ if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ disable_wq(wq);
+ } else if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+ int i;
+
+ dev_dbg(dev, "%s removing dev %s\n", __func__,
+ dev_name(&idxd->conf_dev));
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i,
+ dev_name(&idxd->conf_dev));
+ device_release_driver(&wq->conf_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
+ if (rc < 0)
+ dev_warn(dev, "Device disable failed\n");
+ else
+ dev_info(dev, "Device %s disabled\n", dev_name(dev));
+
+ }
+
+ return 0;
+}
+
+static void idxd_config_bus_shutdown(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+ .shutdown = idxd_config_bus_shutdown,
+};
+
+static struct bus_type *idxd_bus_types[] = {
+ &dsa_bus_type
+};
+
+static struct idxd_device_driver dsa_drv = {
+ .drv = {
+ .name = "dsa",
+ .bus = &dsa_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
+static struct idxd_device_driver *idxd_drvs[] = {
+ &dsa_drv
+};
+
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+{
+ return idxd_bus_types[idxd->type];
+}
+
+static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ return &dsa_device_type;
+ else
+ return NULL;
+}
+
+/* IDXD generic driver setup */
+int idxd_register_driver(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = driver_register(&idxd_drvs[i]->drv);
+ if (rc < 0)
+ goto drv_fail;
+ }
+
+ return 0;
+
+drv_fail:
+ for (; i > 0; i--)
+ driver_unregister(&idxd_drvs[i]->drv);
+ return rc;
+}
+
+void idxd_unregister_driver(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ driver_unregister(&idxd_drvs[i]->drv);
+}
+
+/* IDXD engine attributes */
+static ssize_t engine_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+
+ if (engine->group)
+ return sprintf(buf, "%d\n", engine->group->id);
+ else
+ return sprintf(buf, "%d\n", -1);
+}
+
+static ssize_t engine_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_device *idxd = engine->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (engine->group) {
+ engine->group->num_engines--;
+ engine->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = engine->group;
+
+ if (prevg)
+ prevg->num_engines--;
+ engine->group = &idxd->groups[id];
+ engine->group->num_engines++;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_engine_group =
+ __ATTR(group_id, 0644, engine_group_id_show,
+ engine_group_id_store);
+
+static struct attribute *idxd_engine_attributes[] = {
+ &dev_attr_engine_group.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_engine_attribute_group = {
+ .attrs = idxd_engine_attributes,
+};
+
+static const struct attribute_group *idxd_engine_attribute_groups[] = {
+ &idxd_engine_attribute_group,
+ NULL,
+};
+
+/* Group attributes */
+
+static void idxd_set_free_tokens(struct idxd_device *idxd)
+{
+ int i, tokens;
+
+ for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *g = &idxd->groups[i];
+
+ tokens += g->tokens_reserved;
+ }
+
+ idxd->nr_tokens = idxd->max_tokens - tokens;
+}
+
+static ssize_t group_tokens_reserved_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_reserved);
+}
+
+static ssize_t group_tokens_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ if (val > idxd->max_tokens)
+ return -EINVAL;
+
+ if (val > idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_reserved = val;
+ idxd_set_free_tokens(idxd);
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_reserved =
+ __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
+ group_tokens_reserved_store);
+
+static ssize_t group_tokens_allowed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_allowed);
+}
+
+static ssize_t group_tokens_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+ if (val < 4 * group->num_engines ||
+ val > group->tokens_reserved + idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_allowed = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_allowed =
+ __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
+ group_tokens_allowed_store);
+
+static ssize_t group_use_token_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->use_token_limit);
+}
+
+static ssize_t group_use_token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ group->use_token_limit = !!val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_use_token_limit =
+ __ATTR(use_token_limit, 0644, group_use_token_limit_show,
+ group_use_token_limit_store);
+
+static ssize_t group_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ if (!engine->group)
+ continue;
+
+ if (engine->group->id == group->id)
+ rc += sprintf(tmp + rc, "engine%d.%d ",
+ idxd->id, engine->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_engines =
+ __ATTR(engines, 0444, group_engines_show, NULL);
+
+static ssize_t group_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (!wq->group)
+ continue;
+
+ if (wq->group->id == group->id)
+ rc += sprintf(tmp + rc, "wq%d.%d ",
+ idxd->id, wq->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_work_queues =
+ __ATTR(work_queues, 0444, group_work_queues_show, NULL);
+
+static ssize_t group_traffic_class_a_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_a);
+}
+
+static ssize_t group_traffic_class_a_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_a = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_a =
+ __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
+ group_traffic_class_a_store);
+
+static ssize_t group_traffic_class_b_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_b);
+}
+
+static ssize_t group_traffic_class_b_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_b = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_b =
+ __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
+ group_traffic_class_b_store);
+
+static struct attribute *idxd_group_attributes[] = {
+ &dev_attr_group_work_queues.attr,
+ &dev_attr_group_engines.attr,
+ &dev_attr_group_use_token_limit.attr,
+ &dev_attr_group_tokens_allowed.attr,
+ &dev_attr_group_tokens_reserved.attr,
+ &dev_attr_group_traffic_class_a.attr,
+ &dev_attr_group_traffic_class_b.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_group_attribute_group = {
+ .attrs = idxd_group_attributes,
+};
+
+static const struct attribute_group *idxd_group_attribute_groups[] = {
+ &idxd_group_attribute_group,
+ NULL,
+};
+
+/* IDXD work queue attribs */
+static ssize_t wq_clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->client_count);
+}
+
+static struct device_attribute dev_attr_wq_clients =
+ __ATTR(clients, 0444, wq_clients_show, NULL);
+
+static ssize_t wq_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->state) {
+ case IDXD_WQ_DISABLED:
+ return sprintf(buf, "disabled\n");
+ case IDXD_WQ_ENABLED:
+ return sprintf(buf, "enabled\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+
+static struct device_attribute dev_attr_wq_state =
+ __ATTR(state, 0444, wq_state_show, NULL);
+
+static ssize_t wq_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->group)
+ return sprintf(buf, "%u\n", wq->group->id);
+ else
+ return sprintf(buf, "-1\n");
+}
+
+static ssize_t wq_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (wq->group) {
+ wq->group->num_wqs--;
+ wq->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = wq->group;
+
+ if (prevg)
+ prevg->num_wqs--;
+ wq->group = group;
+ group->num_wqs++;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_group_id =
+ __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
+
+static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n",
+ wq_dedicated(wq) ? "dedicated" : "shared");
+}
+
+static ssize_t wq_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (sysfs_streq(buf, "dedicated")) {
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ wq->threshold = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_mode =
+ __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
+
+static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->size);
+}
+
+static ssize_t wq_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long size;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &size);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (size > idxd->max_wq_size)
+ return -EINVAL;
+
+ wq->size = size;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_size =
+ __ATTR(size, 0644, wq_size_show, wq_size_store);
+
+static ssize_t wq_priority_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->priority);
+}
+
+static ssize_t wq_priority_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long prio;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &prio);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (prio > IDXD_MAX_PRIORITY)
+ return -EINVAL;
+
+ wq->priority = prio;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_priority =
+ __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+
+static ssize_t wq_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->type) {
+ case IDXD_WQT_KERNEL:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_KERNEL]);
+ case IDXD_WQT_USER:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_USER]);
+ case IDXD_WQT_NONE:
+ default:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_NONE]);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t wq_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ enum idxd_wq_type old_type;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ old_type = wq->type;
+ if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+ wq->type = IDXD_WQT_KERNEL;
+ else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
+ wq->type = IDXD_WQT_USER;
+ else
+ wq->type = IDXD_WQT_NONE;
+
+ /* If we are changing queue type, clear the name */
+ if (wq->type != old_type)
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_type =
+ __ATTR(type, 0644, wq_type_show, wq_type_store);
+
+static ssize_t wq_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n", wq->name);
+}
+
+static ssize_t wq_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
+ return -EINVAL;
+
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+ strncpy(wq->name, buf, WQ_NAME_SIZE);
+ strreplace(wq->name, '\n', '\0');
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_name =
+ __ATTR(name, 0644, wq_name_show, wq_name_store);
+
+static ssize_t wq_cdev_minor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+}
+
+static struct device_attribute dev_attr_wq_cdev_minor =
+ __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+
+static struct attribute *idxd_wq_attributes[] = {
+ &dev_attr_wq_clients.attr,
+ &dev_attr_wq_state.attr,
+ &dev_attr_wq_group_id.attr,
+ &dev_attr_wq_mode.attr,
+ &dev_attr_wq_size.attr,
+ &dev_attr_wq_priority.attr,
+ &dev_attr_wq_type.attr,
+ &dev_attr_wq_name.attr,
+ &dev_attr_wq_cdev_minor.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_wq_attribute_group = {
+ .attrs = idxd_wq_attributes,
+};
+
+static const struct attribute_group *idxd_wq_attribute_groups[] = {
+ &idxd_wq_attribute_group,
+ NULL,
+};
+
+/* IDXD device attribs */
+static ssize_t max_work_queues_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wq_size);
+}
+static DEVICE_ATTR_RO(max_work_queues_size);
+
+static ssize_t max_groups_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_groups);
+}
+static DEVICE_ATTR_RO(max_groups);
+
+static ssize_t max_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wqs);
+}
+static DEVICE_ATTR_RO(max_work_queues);
+
+static ssize_t max_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_engines);
+}
+static DEVICE_ATTR_RO(max_engines);
+
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t max_batch_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_batch_size);
+}
+static DEVICE_ATTR_RO(max_batch_size);
+
+static ssize_t max_transfer_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+}
+static DEVICE_ATTR_RO(max_transfer_size);
+
+static ssize_t op_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+}
+static DEVICE_ATTR_RO(op_cap);
+
+static ssize_t configurable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n",
+ test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+}
+static DEVICE_ATTR_RO(configurable);
+
+static ssize_t clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long flags;
+ int count = 0, i;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ count += wq->client_count;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return sprintf(buf, "%d\n", count);
+}
+static DEVICE_ATTR_RO(clients);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ switch (idxd->state) {
+ case IDXD_DEV_DISABLED:
+ case IDXD_DEV_CONF_READY:
+ return sprintf(buf, "disabled\n");
+ case IDXD_DEV_ENABLED:
+ return sprintf(buf, "enabled\n");
+ case IDXD_DEV_HALTED:
+ return sprintf(buf, "halted\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ int i, out = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < 4; i++)
+ out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ out--;
+ out += sprintf(buf + out, "\n");
+ return out;
+}
+static DEVICE_ATTR_RO(errors);
+
+static ssize_t max_tokens_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_tokens);
+}
+static DEVICE_ATTR_RO(max_tokens);
+
+static ssize_t token_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->token_limit);
+}
+
+static ssize_t token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (!idxd->hw.group_cap.token_limit)
+ return -EPERM;
+
+ if (val > idxd->hw.group_cap.total_tokens)
+ return -EINVAL;
+
+ idxd->token_limit = val;
+ return count;
+}
+static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t cdev_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->major);
+}
+static DEVICE_ATTR_RO(cdev_major);
+
+static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_max_groups.attr,
+ &dev_attr_max_work_queues.attr,
+ &dev_attr_max_work_queues_size.attr,
+ &dev_attr_max_engines.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_max_batch_size.attr,
+ &dev_attr_max_transfer_size.attr,
+ &dev_attr_op_cap.attr,
+ &dev_attr_configurable.attr,
+ &dev_attr_clients.attr,
+ &dev_attr_state.attr,
+ &dev_attr_errors.attr,
+ &dev_attr_max_tokens.attr,
+ &dev_attr_token_limit.attr,
+ &dev_attr_cdev_major.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_device_attribute_group = {
+ .attrs = idxd_device_attributes,
+};
+
+static const struct attribute_group *idxd_attribute_groups[] = {
+ &idxd_device_attribute_group,
+ NULL,
+};
+
+static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ engine->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&engine->conf_dev, "engine%d.%d",
+ idxd->id, engine->id);
+ engine->conf_dev.bus = idxd_get_bus_type(idxd);
+ engine->conf_dev.groups = idxd_engine_attribute_groups;
+ engine->conf_dev.type = &idxd_engine_device_type;
+ dev_dbg(dev, "Engine device register: %s\n",
+ dev_name(&engine->conf_dev));
+ rc = device_register(&engine->conf_dev);
+ if (rc < 0) {
+ put_device(&engine->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ group->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&group->conf_dev, "group%d.%d",
+ idxd->id, group->id);
+ group->conf_dev.bus = idxd_get_bus_type(idxd);
+ group->conf_dev.groups = idxd_group_attribute_groups;
+ group->conf_dev.type = &idxd_group_device_type;
+ dev_dbg(dev, "Group device register: %s\n",
+ dev_name(&group->conf_dev));
+ rc = device_register(&group->conf_dev);
+ if (rc < 0) {
+ put_device(&group->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ wq->conf_dev.bus = idxd_get_bus_type(idxd);
+ wq->conf_dev.groups = idxd_wq_attribute_groups;
+ wq->conf_dev.type = &idxd_wq_device_type;
+ dev_dbg(dev, "WQ device register: %s\n",
+ dev_name(&wq->conf_dev));
+ rc = device_register(&wq->conf_dev);
+ if (rc < 0) {
+ put_device(&wq->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ char devname[IDXD_NAME_SIZE];
+
+ sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+ idxd->conf_dev.parent = dev;
+ dev_set_name(&idxd->conf_dev, "%s", devname);
+ idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+ idxd->conf_dev.groups = idxd_attribute_groups;
+ idxd->conf_dev.type = idxd_get_device_type(idxd);
+
+ dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+ rc = device_register(&idxd->conf_dev);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+int idxd_setup_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+
+ rc = idxd_setup_device_sysfs(idxd);
+ if (rc < 0) {
+ dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_wq_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_group_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_engine_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cleanup_sysfs(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+
+ device_unregister(&idxd->conf_dev);
+}
+
+int idxd_register_bus_type(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = bus_register(idxd_bus_types[i]);
+ if (rc < 0)
+ goto bus_err;
+ }
+
+ return 0;
+
+bus_err:
+ for (; i > 0; i--)
+ bus_unregister(idxd_bus_types[i]);
+ return rc;
+}
+
+void idxd_unregister_bus_type(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ bus_unregister(idxd_bus_types[i]);
+}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c27e206a764c..066b21a32232 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
return;
}
sdmac->desc = desc = to_sdma_desc(&vd->tx);
- /*
- * Do not delete the node in desc_issued list in cyclic mode, otherwise
- * the desc allocated will never be freed in vchan_dma_desc_free_list
- */
- if (!(sdmac->flags & IMX_DMA_SG_LOOP))
- list_del(&vd->node);
+
+ list_del(&vd->node);
sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work)
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
- sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
sdmac->context_loaded = false;
}
-static int sdma_disable_channel_async(struct dma_chan *chan)
+static int sdma_terminate_all(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
sdma_disable_channel(chan);
- if (sdmac->desc)
+ if (sdmac->desc) {
+ vchan_terminate_vdesc(&sdmac->desc->vd);
+ sdmac->desc = NULL;
schedule_work(&sdmac->terminate_worker);
+ }
+
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
return 0;
}
@@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel_async(chan);
+ sdma_terminate_all(chan);
sdma_channel_synchronize(chan);
@@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_desc *desc;
+ struct sdma_desc *desc = NULL;
u32 residue;
struct virt_dma_desc *vd;
enum dma_status ret;
@@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
return ret;
spin_lock_irqsave(&sdmac->vc.lock, flags);
+
vd = vchan_find_desc(&sdmac->vc, cookie);
- if (vd) {
+ if (vd)
desc = to_sdma_desc(&vd->tx);
+ else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
+ desc = sdmac->desc;
+
+ if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP)
residue = (desc->num_bd - desc->buf_ptail) *
desc->period_len - desc->chn_real_count;
else
residue = desc->chn_count - desc->chn_real_count;
- } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
- residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
} else {
residue = 0;
}
+
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
@@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index a6a6dc432db8..60e9afbb896c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma);
-
- dma_pool_destroy(ioat_dma->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
}
/**
@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
- ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+ ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
return;
ioat_stop(ioat_chan);
- ioat_reset_hw(ioat_chan);
- /* Put LTR to idle */
- if (ioat_dma->version >= IOAT_VER_3_4)
- writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
- ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+ ioat_reset_hw(ioat_chan);
+
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
.err_handler = &ioat_err_handler,
};
+static void release_ioatdma(struct dma_device *device)
+{
+ struct ioatdma_device *d = to_ioatdma_device(device);
+ int i;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(d->idx[i]);
+
+ dma_pool_destroy(d->completion_pool);
+ kfree(d);
+}
+
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
- struct device *dev = &pdev->dev;
- struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
+ d->dma_dev.device_release = release_ioatdma;
return d;
}
@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
+ ioat_shutdown(pdev);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index c20e6bd4e298..29f1223b285a 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
vchan_get_all_descriptors(&c->vc, &head);
- vchan_dma_desc_free_list(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index c2d779daa4b5..b2c2b5e8093c 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_dma.h>
+#include "dmaengine.h"
+
static LIST_HEAD(of_dma_list);
static DEFINE_MUTEX(of_dma_lock);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 023f951189a7..c683051257fd 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
}
vchan_get_all_descriptors(&vchan->vc, &head);
- vchan_dma_desc_free_list(&vchan->vc, &head);
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6cce9ef61b29..88b884cbb7c1 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
- pm_runtime_disable(dev);
-
- if (!pm_runtime_status_suspended(dev)) {
- /* amba did not disable the clock */
- amba_pclk_disable(pcdev);
- }
+ pm_runtime_force_suspend(dev);
amba_pclk_unprepare(pcdev);
return 0;
@@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev)
if (ret)
return ret;
- if (!pm_runtime_status_suspended(dev))
- ret = amba_pclk_enable(pcdev);
-
- pm_runtime_enable(dev);
+ pm_runtime_force_resume(dev);
return ret;
}
-static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+static const struct dev_pm_ops pl330_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
+};
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
new file mode 100644
index 000000000000..db4c5fd453a9
--- /dev/null
+++ b/drivers/dma/plx_dma.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
+ * Copyright (c) 2019, GigaIO Networks, Inc
+ */
+
+#include "dmaengine.h"
+
+#include <linux/circ_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logan Gunthorpe");
+
+#define PLX_REG_DESC_RING_ADDR 0x214
+#define PLX_REG_DESC_RING_ADDR_HI 0x218
+#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
+#define PLX_REG_DESC_RING_COUNT 0x220
+#define PLX_REG_DESC_RING_LAST_ADDR 0x224
+#define PLX_REG_DESC_RING_LAST_SIZE 0x228
+#define PLX_REG_PREF_LIMIT 0x234
+#define PLX_REG_CTRL 0x238
+#define PLX_REG_CTRL2 0x23A
+#define PLX_REG_INTR_CTRL 0x23C
+#define PLX_REG_INTR_STATUS 0x23E
+
+#define PLX_REG_PREF_LIMIT_PREF_FOUR 8
+
+#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
+#define PLX_REG_CTRL_ABORT BIT(1)
+#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
+#define PLX_REG_CTRL_START BIT(3)
+#define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
+#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
+#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
+#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
+#define PLX_REG_CTRL_DESC_INVALID BIT(8)
+#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
+#define PLX_REG_CTRL_ABORT_DONE BIT(10)
+#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
+#define PLX_REG_CTRL_IN_PROGRESS BIT(30)
+
+#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
+ PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
+ PLX_REG_CTRL_ABORT_DONE | \
+ PLX_REG_CTRL_IMM_PAUSE_DONE)
+
+#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
+ PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
+ PLX_REG_CTRL_START | \
+ PLX_REG_CTRL_RESET_VAL)
+
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
+
+#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
+#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
+#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
+#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
+#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
+
+#define PLX_REG_INTR_STATUS_ERROR BIT(0)
+#define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
+#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
+#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
+
+struct plx_dma_hw_std_desc {
+ __le32 flags_and_size;
+ __le16 dst_addr_hi;
+ __le16 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 src_addr_lo;
+};
+
+#define PLX_DESC_SIZE_MASK 0x7ffffff
+#define PLX_DESC_FLAG_VALID BIT(31)
+#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
+
+#define PLX_DESC_WB_SUCCESS BIT(30)
+#define PLX_DESC_WB_RD_FAIL BIT(29)
+#define PLX_DESC_WB_WR_FAIL BIT(28)
+
+#define PLX_DMA_RING_COUNT 2048
+
+struct plx_dma_desc {
+ struct dma_async_tx_descriptor txd;
+ struct plx_dma_hw_std_desc *hw;
+ u32 orig_size;
+};
+
+struct plx_dma_dev {
+ struct dma_device dma_dev;
+ struct dma_chan dma_chan;
+ struct pci_dev __rcu *pdev;
+ void __iomem *bar;
+ struct tasklet_struct desc_task;
+
+ spinlock_t ring_lock;
+ bool ring_active;
+ int head;
+ int tail;
+ struct plx_dma_hw_std_desc *hw_ring;
+ dma_addr_t hw_ring_dma;
+ struct plx_dma_desc **desc_ring;
+};
+
+static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
+{
+ return container_of(c, struct plx_dma_dev, dma_chan);
+}
+
+static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct plx_dma_desc, txd);
+}
+
+static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
+{
+ return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
+}
+
+static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+ u32 flags;
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
+
+ if (flags & PLX_DESC_FLAG_VALID)
+ break;
+
+ res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
+
+ if (flags & PLX_DESC_WB_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (flags & PLX_DESC_WB_WR_FAIL)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else
+ res.result = DMA_TRANS_READ_FAILED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+
+ plx_dma_process_desc(plxdev);
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ res.residue = desc->orig_size;
+ res.result = DMA_TRANS_ABORTED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void __plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
+ return;
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ while (!time_after(jiffies, timeout)) {
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
+ break;
+
+ cpu_relax();
+ }
+
+ if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
+ dev_err(plxdev->dma_dev.dev,
+ "Timeout waiting for graceful pause!\n");
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+}
+
+static void plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ __plx_dma_stop(plxdev);
+
+ rcu_read_unlock();
+}
+
+static void plx_dma_desc_task(unsigned long data)
+{
+ struct plx_dma_dev *plxdev = (void *)data;
+
+ plx_dma_process_desc(plxdev);
+}
+
+static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
+ dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+ __acquires(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
+ struct plx_dma_desc *plxdesc;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ if (!plxdev->ring_active)
+ goto err_unlock;
+
+ if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
+ goto err_unlock;
+
+ if (len > PLX_DESC_SIZE_MASK)
+ goto err_unlock;
+
+ plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
+ plxdev->head++;
+
+ plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
+ plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
+ plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
+ plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
+
+ plxdesc->orig_size = len;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ len |= PLX_DESC_FLAG_INT_WHEN_DONE;
+
+ plxdesc->hw->flags_and_size = cpu_to_le32(len);
+ plxdesc->txd.flags = flags;
+
+ /* return with the lock held, it will be released in tx_submit */
+
+ return &plxdesc->txd;
+
+err_unlock:
+ /*
+ * Keep sparse happy by restoring an even lock count on
+ * this lock.
+ */
+ __acquire(plxdev->ring_lock);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+ return NULL;
+}
+
+static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
+ __releases(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
+ struct plx_dma_desc *plxdesc = to_plx_desc(desc);
+ dma_cookie_t cookie;
+
+ cookie = dma_cookie_assign(desc);
+
+ /*
+ * Ensure the descriptor updates are visible to the dma device
+ * before setting the valid bit.
+ */
+ wmb();
+
+ plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ return cookie;
+}
+
+static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ plx_dma_process_desc(plxdev);
+
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void plx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /*
+ * Ensure the valid bits are visible before starting the
+ * DMA engine.
+ */
+ wmb();
+
+ writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
+
+ rcu_read_unlock();
+}
+
+static irqreturn_t plx_dma_isr(int irq, void *devid)
+{
+ struct plx_dma_dev *plxdev = devid;
+ u32 status;
+
+ status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
+ tasklet_schedule(&plxdev->desc_task);
+
+ writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
+{
+ struct plx_dma_desc *desc;
+ int i;
+
+ plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
+ sizeof(*plxdev->desc_ring), GFP_KERNEL);
+ if (!plxdev->desc_ring)
+ return -ENOMEM;
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ goto free_and_exit;
+
+ dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
+ desc->txd.tx_submit = plx_dma_tx_submit;
+ desc->hw = &plxdev->hw_ring[i];
+
+ plxdev->desc_ring[i] = desc;
+ }
+
+ return 0;
+
+free_and_exit:
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+ kfree(plxdev->desc_ring);
+ return -ENOMEM;
+}
+
+static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ int rc;
+
+ plxdev->head = plxdev->tail = 0;
+ plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
+ &plxdev->hw_ring_dma, GFP_KERNEL);
+ if (!plxdev->hw_ring)
+ return -ENOMEM;
+
+ rc = plx_dma_alloc_desc(plxdev);
+ if (rc)
+ goto out_free_hw_ring;
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ rc = -ENODEV;
+ goto out_free_hw_ring;
+ }
+
+ writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(upper_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+ writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
+
+ plxdev->ring_active = true;
+
+ rcu_read_unlock();
+
+ return PLX_DMA_RING_COUNT;
+
+out_free_hw_ring:
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+ return rc;
+}
+
+static void plx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ struct pci_dev *pdev;
+ int irq = -1;
+ int i;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ plx_dma_stop(plxdev);
+
+ rcu_read_lock();
+ pdev = rcu_dereference(plxdev->pdev);
+ if (pdev)
+ irq = pci_irq_vector(pdev, 0);
+ rcu_read_unlock();
+
+ if (irq > 0)
+ synchronize_irq(irq);
+
+ tasklet_kill(&plxdev->desc_task);
+
+ plx_dma_abort_desc(plxdev);
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+
+ kfree(plxdev->desc_ring);
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+
+}
+
+static void plx_dma_release(struct dma_device *dma_dev)
+{
+ struct plx_dma_dev *plxdev =
+ container_of(dma_dev, struct plx_dma_dev, dma_dev);
+
+ put_device(dma_dev->dev);
+ kfree(plxdev);
+}
+
+static int plx_dma_create(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev;
+ struct dma_device *dma;
+ struct dma_chan *chan;
+ int rc;
+
+ plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
+ if (!plxdev)
+ return -ENOMEM;
+
+ rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+ KBUILD_MODNAME, plxdev);
+ if (rc) {
+ kfree(plxdev);
+ return rc;
+ }
+
+ spin_lock_init(&plxdev->ring_lock);
+ tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
+ (unsigned long)plxdev);
+
+ RCU_INIT_POINTER(plxdev->pdev, pdev);
+ plxdev->bar = pcim_iomap_table(pdev)[0];
+
+ dma = &plxdev->dma_dev;
+ dma->chancnt = 1;
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma->dev = get_device(&pdev->dev);
+
+ dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = plx_dma_free_chan_resources;
+ dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
+ dma->device_issue_pending = plx_dma_issue_pending;
+ dma->device_tx_status = plx_dma_tx_status;
+ dma->device_release = plx_dma_release;
+
+ chan = &plxdev->dma_chan;
+ chan->device = dma;
+ dma_cookie_init(chan);
+ list_add_tail(&chan->device_node, &dma->channels);
+
+ rc = dma_async_device_register(dma);
+ if (rc) {
+ pci_err(pdev, "Failed to register dma device: %d\n", rc);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+ kfree(plxdev);
+ return rc;
+ }
+
+ pci_set_drvdata(pdev, plxdev);
+
+ return 0;
+}
+
+static int plx_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
+ if (rc)
+ return rc;
+
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (rc <= 0)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = plx_dma_create(pdev);
+ if (rc)
+ goto err_free_irq_vectors;
+
+ pci_info(pdev, "PLX DMA Channel Registered\n");
+
+ return 0;
+
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return rc;
+}
+
+static void plx_dma_remove(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
+
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+
+ rcu_assign_pointer(plxdev->pdev, NULL);
+ synchronize_rcu();
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ __plx_dma_stop(plxdev);
+ plx_dma_abort_desc(plxdev);
+
+ plxdev->bar = NULL;
+ dma_async_device_unregister(&plxdev->dma_dev);
+
+ pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id plx_dma_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x87D0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_SYSTEM_OTHER << 8,
+ .class_mask = 0xFFFFFFFF,
+ },
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
+
+static struct pci_driver plx_dma_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = plx_dma_pci_tbl,
+ .probe = plx_dma_probe,
+ .remove = plx_dma_remove,
+};
+module_pci_driver(plx_dma_pci_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 43da8eeb18ef..8e14c72d03f0 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
s3c24xx_dma_start_next_sg(s3cchan, txd);
}
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
- struct s3c24xx_dma_chan *s3cchan)
-{
- LIST_HEAD(head);
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
/*
* Try to allocate a physical channel. When successful, assign it to
* this virtual channel, and initiate the next descriptor. The
@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
{
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ LIST_HEAD(head);
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&s3cchan->vc.lock, flags);
@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs not yet fired as well */
- s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+ vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+ return 0;
+
unlock:
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
@@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
/* Basic sanity check */
if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+ dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
pdata->num_phy_channels, MAX_DMA_CHANNELS);
return -EINVAL;
}
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 465256fe8b1f..6d0bec947636 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
kfree(chan->desc);
chan->desc = NULL;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
sf_pdma_disclaim_chan(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
}
static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
chan->desc = NULL;
chan->xfer_err = false;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e397a50058c8..bbc2bda3b902 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dma_addr_t src, dest;
u32 endpoints;
int nr_periods, offset, plength, i;
+ u8 ram_type, io_mode, linear_mode;
if (!is_slave_direction(dir)) {
dev_err(chan2dev(chan), "Invalid DMA direction\n");
return NULL;
}
- if (vchan->is_dedicated) {
- /*
- * As we are using this just for audio data, we need to use
- * normal DMA. There is nothing stopping us from supporting
- * dedicated DMA here as well, so if a client comes up and
- * requires it, it will be simple to implement it.
- */
- dev_err(chan2dev(chan),
- "Cyclic transfers are only supported on Normal DMA\n");
- return NULL;
- }
-
contract = generate_dma_contract();
if (!contract)
return NULL;
contract->is_cyclic = 1;
- /* Figure out the endpoints and the address we need */
+ if (vchan->is_dedicated) {
+ io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ } else {
+ io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ }
+
if (dir == DMA_MEM_TO_DEV) {
src = buf;
dest = sconfig->dst_addr;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
} else {
src = sconfig->src_addr;
dest = buf;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
}
/*
@@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dest = buf + offset;
/* Make the promise */
- promise = generate_ndma_promise(chan, src, dest,
- plength, sconfig, dir);
+ if (vchan->is_dedicated)
+ promise = generate_ddma_promise(chan, src, dest,
+ plength, sconfig);
+ else
+ promise = generate_ndma_promise(chan, src, dest,
+ plength, sconfig, dir);
+
if (!promise) {
/* TODO: should we free everything? */
return NULL;
@@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
}
spin_lock_irqsave(&vchan->vc.lock, flags);
- vchan_dma_desc_free_list(&vchan->vc, &head);
/* Clear these so the vchan is usable again */
vchan->processing = NULL;
vchan->pchan = NULL;
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index d507c24fbf31..f76e06651f80 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -34,5 +34,29 @@ config DMA_OMAP
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
+config TI_K3_UDMA
+ bool "Texas Instruments UDMA support"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL
+ depends on TI_SCI_INTA_IRQCHIP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_K3_RINGACC
+ select TI_K3_PSIL
+ help
+ Enable support for the TI UDMA (Unified DMA) controller. This
+ DMA engine is used in AM65x and j721e.
+
+config TI_K3_UDMA_GLUE_LAYER
+ bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_K3_UDMA
+ help
+ Say y here to support the K3 NAVSS DMA glue interface
+ If unsure, say N.
+
+config TI_K3_PSIL
+ bool
+
config TI_DMA_CROSSBAR
bool
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 113e59ec9c32..9a29a107e374 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -2,4 +2,7 @@
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
+obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 756a3c951dc7..03a7f647f7b2 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev)
if (!info)
return -ENODEV;
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- return ret;
- }
-
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ecc);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
/* Get eDMA3 configuration from IP */
ret = edma_setup_from_hw(dev, info, ecc);
if (ret)
- return ret;
+ goto err_disable_pm;
/* Allocate memory based on the information we got from the IP */
ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
sizeof(*ecc->slave_chans), GFP_KERNEL);
- if (!ecc->slave_chans)
- return -ENOMEM;
ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->slot_inuse)
- return -ENOMEM;
ecc->channels_mask = devm_kcalloc(dev,
BITS_TO_LONGS(ecc->num_channels),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->channels_mask)
- return -ENOMEM;
+ if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
/* Mark all channels available initially */
bitmap_fill(ecc->channels_mask, ecc->num_channels);
@@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev)
ecc);
if (ret) {
dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccint = irq;
}
@@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev)
ecc);
if (ret) {
dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccerrint = irq;
}
@@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev)
ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(dev, "Can't allocate PaRAM dummy slot\n");
- return ecc->dummy_slot;
+ ret = ecc->dummy_slot;
+ goto err_disable_pm;
}
queue_priority_mapping = info->queue_priority_mapping;
@@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev)
err_reg1:
edma_free_slot(ecc, ecc->dummy_slot);
+err_disable_pm:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev)
if (ecc->dma_memcpy)
dma_async_device_unregister(ecc->dma_memcpy);
edma_free_slot(ecc, ecc->dummy_slot);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return 0;
}
diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c
new file mode 100644
index 000000000000..a896a15908cf
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am654.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am654_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0x4300),
+ PSIL_ETHERNET(0x4301),
+ PSIL_ETHERNET(0x4302),
+ PSIL_ETHERNET(0x4303),
+ /* PDMA0 - McASPs */
+ PSIL_PDMA_XY_TR(0x4400),
+ PSIL_PDMA_XY_TR(0x4401),
+ PSIL_PDMA_XY_TR(0x4402),
+ /* PDMA1 - SPI0-4 */
+ PSIL_PDMA_XY_PKT(0x4500),
+ PSIL_PDMA_XY_PKT(0x4501),
+ PSIL_PDMA_XY_PKT(0x4502),
+ PSIL_PDMA_XY_PKT(0x4503),
+ PSIL_PDMA_XY_PKT(0x4504),
+ PSIL_PDMA_XY_PKT(0x4505),
+ PSIL_PDMA_XY_PKT(0x4506),
+ PSIL_PDMA_XY_PKT(0x4507),
+ PSIL_PDMA_XY_PKT(0x4508),
+ PSIL_PDMA_XY_PKT(0x4509),
+ PSIL_PDMA_XY_PKT(0x450a),
+ PSIL_PDMA_XY_PKT(0x450b),
+ PSIL_PDMA_XY_PKT(0x450c),
+ PSIL_PDMA_XY_PKT(0x450d),
+ PSIL_PDMA_XY_PKT(0x450e),
+ PSIL_PDMA_XY_PKT(0x450f),
+ PSIL_PDMA_XY_PKT(0x4510),
+ PSIL_PDMA_XY_PKT(0x4511),
+ PSIL_PDMA_XY_PKT(0x4512),
+ PSIL_PDMA_XY_PKT(0x4513),
+ /* PDMA1 - USART0-2 */
+ PSIL_PDMA_XY_PKT(0x4514),
+ PSIL_PDMA_XY_PKT(0x4515),
+ PSIL_PDMA_XY_PKT(0x4516),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 - ADCs */
+ PSIL_PDMA_XY_TR(0x7100),
+ PSIL_PDMA_XY_TR(0x7101),
+ PSIL_PDMA_XY_TR(0x7102),
+ PSIL_PDMA_XY_TR(0x7103),
+ /* MCU_PDMA1 - MCU_SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ PSIL_PDMA_XY_PKT(0x7208),
+ PSIL_PDMA_XY_PKT(0x7209),
+ PSIL_PDMA_XY_PKT(0x720a),
+ PSIL_PDMA_XY_PKT(0x720b),
+ /* MCU_PDMA1 - MCU_USART0 */
+ PSIL_PDMA_XY_PKT(0x7212),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am654_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0xc300),
+ PSIL_ETHERNET(0xc301),
+ PSIL_ETHERNET(0xc302),
+ PSIL_ETHERNET(0xc303),
+ PSIL_ETHERNET(0xc304),
+ PSIL_ETHERNET(0xc305),
+ PSIL_ETHERNET(0xc306),
+ PSIL_ETHERNET(0xc307),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+};
+
+struct psil_ep_map am654_ep_map = {
+ .name = "am654",
+ .src = am654_src_ep_map,
+ .src_count = ARRAY_SIZE(am654_src_ep_map),
+ .dst = am654_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am654_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
new file mode 100644
index 000000000000..e3cfd5f66842
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721e_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ PSIL_PDMA_MCASP(0x4503),
+ PSIL_PDMA_MCASP(0x4504),
+ PSIL_PDMA_MCASP(0x4505),
+ PSIL_PDMA_MCASP(0x4506),
+ PSIL_PDMA_MCASP(0x4507),
+ PSIL_PDMA_MCASP(0x4508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0x4624),
+ PSIL_PDMA_XY_PKT(0x4625),
+ PSIL_PDMA_XY_PKT(0x4626),
+ PSIL_PDMA_XY_PKT(0x4627),
+ PSIL_PDMA_XY_PKT(0x4628),
+ PSIL_PDMA_XY_PKT(0x4629),
+ PSIL_PDMA_XY_PKT(0x4630),
+ PSIL_PDMA_XY_PKT(0x463a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW9 */
+ PSIL_ETHERNET(0x4a00),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721e_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* CPSW9 */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+};
+
+struct psil_ep_map j721e_ep_map = {
+ .name = "j721e",
+ .src = j721e_src_ep_map,
+ .src_count = ARRAY_SIZE(j721e_src_ep_map),
+ .dst = j721e_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721e_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
new file mode 100644
index 000000000000..a1f389ca371e
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_PRIV_H_
+#define K3_PSIL_PRIV_H_
+
+#include <linux/dma/k3-psil.h>
+
+struct psil_ep {
+ u32 thread_id;
+ struct psil_endpoint_config ep_config;
+};
+
+/**
+ * struct psil_ep_map - PSI-L thread ID configuration maps
+ * @name: Name of the map, set it to the name of the SoC
+ * @src: Array of source PSI-L thread configurations
+ * @src_count: Number of entries in the src array
+ * @dst: Array of destination PSI-L thread configurations
+ * @dst_count: Number of entries in the dst array
+ *
+ * In case of symmetric configuration for a matching src/dst thread (for example
+ * 0x4400 and 0xc400) only the src configuration can be present. If no dst
+ * configuration found the code will look for (dst_thread_id & ~0x8000) to find
+ * the symmetric match.
+ */
+struct psil_ep_map {
+ char *name;
+ struct psil_ep *src;
+ int src_count;
+ struct psil_ep *dst;
+ int dst_count;
+};
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
+
+/* SoC PSI-L endpoint maps */
+extern struct psil_ep_map am654_ep_map;
+extern struct psil_ep_map j721e_ep_map;
+
+#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
new file mode 100644
index 000000000000..d7b965049ccb
--- /dev/null
+++ b/drivers/dma/ti/k3-psil.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "k3-psil-priv.h"
+
+static DEFINE_MUTEX(ep_map_mutex);
+static struct psil_ep_map *soc_ep_map;
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
+{
+ int i;
+
+ mutex_lock(&ep_map_mutex);
+ if (!soc_ep_map) {
+ if (of_machine_is_compatible("ti,am654")) {
+ soc_ep_map = &am654_ep_map;
+ } else if (of_machine_is_compatible("ti,j721e")) {
+ soc_ep_map = &j721e_ep_map;
+ } else {
+ pr_err("PSIL: No compatible machine found for map\n");
+ return ERR_PTR(-ENOTSUPP);
+ }
+ pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+ }
+ mutex_unlock(&ep_map_mutex);
+
+ if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) {
+ /* check in destination thread map */
+ for (i = 0; i < soc_ep_map->dst_count; i++) {
+ if (soc_ep_map->dst[i].thread_id == thread_id)
+ return &soc_ep_map->dst[i].ep_config;
+ }
+ }
+
+ thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET;
+ if (soc_ep_map->src) {
+ for (i = 0; i < soc_ep_map->src_count; i++) {
+ if (soc_ep_map->src[i].thread_id == thread_id)
+ return &soc_ep_map->src[i].ep_config;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(psil_get_ep_config);
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config)
+{
+ struct psil_endpoint_config *dst_ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int index;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ index = of_property_match_string(dev->of_node, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells",
+ index, &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ dst_ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(dst_ep_config)) {
+ pr_err("PSIL: thread ID 0x%04x not defined in map\n",
+ thread_id);
+ of_node_put(dma_spec.np);
+ return PTR_ERR(dst_ep_config);
+ }
+
+ memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config));
+
+ of_node_put(dma_spec.np);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
new file mode 100644
index 000000000000..c1511298ece2
--- /dev/null
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 NAVSS DMA glue interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct k3_udma_glue_common {
+ struct device *dev;
+ struct udma_dev *udmax;
+ const struct udma_tisci_rm *tisci_rm;
+ struct k3_ringacc *ringacc;
+ u32 src_thread;
+ u32 dst_thread;
+
+ u32 hdesc_size;
+ bool epib;
+ u32 psdata_size;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_tchan *udma_tchanx;
+ int udma_tchan_id;
+
+ struct k3_ring *ringtx;
+ struct k3_ring *ringtxcq;
+
+ bool psil_paired;
+
+ int virq;
+
+ atomic_t free_pkts;
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+};
+
+struct k3_udma_glue_rx_flow {
+ struct udma_rflow *udma_rflow;
+ int udma_rflow_id;
+ struct k3_ring *ringrx;
+ struct k3_ring *ringrxfdq;
+
+ int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_rchan *udma_rchanx;
+ int udma_rchan_id;
+ bool remote;
+
+ bool psil_paired;
+
+ u32 swdata_size;
+ int flow_id_base;
+
+ struct k3_udma_glue_rx_flow *flows;
+ u32 flow_num;
+ u32 flows_ready;
+};
+
+#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
+
+static int of_k3_udma_glue_parse(struct device_node *udmax_np,
+ struct k3_udma_glue_common *common)
+{
+ common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
+ "ti,ringacc");
+ if (IS_ERR(common->ringacc))
+ return PTR_ERR(common->ringacc);
+
+ common->udmax = of_xudma_dev_get(udmax_np, NULL);
+ if (IS_ERR(common->udmax))
+ return PTR_ERR(common->udmax);
+
+ common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
+
+ return 0;
+}
+
+static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
+ const char *name, struct k3_udma_glue_common *common,
+ bool tx_chn)
+{
+ struct psil_endpoint_config *ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int ret = 0;
+ int index;
+
+ if (unlikely(!name))
+ return -EINVAL;
+
+ index = of_property_match_string(chn_np, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
+ &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ /* get psil endpoint config */
+ ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ ret = PTR_ERR(ep_config);
+ goto out_put_spec;
+ }
+
+ common->epib = ep_config->needs_epib;
+ common->psdata_size = ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+};
+
+static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ struct device *dev = tx_chn->common.dev;
+
+ dev_dbg(dev, "dump_tx_chn:\n"
+ "udma_tchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n",
+ tx_chn->udma_tchan_id,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+}
+
+static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ xudma_tchanrt_read(chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
+}
+
+static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = tx_chn->udma_tchan_id;
+ if (tx_chn->tx_pause_on_err)
+ req.tx_pause_on_err = 1;
+ if (tx_chn->tx_filt_einfo)
+ req.tx_filt_einfo = 1;
+ if (tx_chn->tx_filt_pswords)
+ req.tx_filt_pswords = 1;
+ req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ if (tx_chn->tx_supr_tdpkt)
+ req.tx_supr_tdpkt = 1;
+ req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
+ req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+ return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
+}
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
+ tx_chn->common.psdata_size,
+ tx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP TX channel */
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ if (IS_ERR(tx_chn->udma_tchanx)) {
+ ret = PTR_ERR(tx_chn->udma_tchanx);
+ dev_err(dev, "UDMAX tchanx get err %d\n", ret);
+ goto err;
+ }
+ tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+
+ atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+
+ /* request and cfg rings */
+ tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ tx_chn->udma_tchan_id, 0);
+ if (!tx_chn->ringtx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TX ring %u\n",
+ tx_chn->udma_tchan_id);
+ goto err;
+ }
+
+ tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ -1, 0);
+ if (!tx_chn->ringtxcq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TXCQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ /* request and cfg psi-l */
+ tx_chn->common.src_thread =
+ xudma_dev_get_psil_base(tx_chn->common.udmax) +
+ tx_chn->udma_tchan_id;
+
+ ret = k3_udma_glue_cfg_tx_chn(tx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg tchan %d\n", ret);
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->psil_paired = true;
+
+ /* reset TX RT registers */
+ k3_udma_glue_disable_tx_chn(tx_chn);
+
+ k3_udma_glue_dump_tx_chn(tx_chn);
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
+
+ if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
+ xudma_tchan_put(tx_chn->common.udmax,
+ tx_chn->udma_tchanx);
+
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_free(tx_chn->ringtxcq);
+
+ if (tx_chn->ringtx)
+ k3_ringacc_ring_free(tx_chn->ringtx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
+
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma)
+{
+ u32 ringtxcq_id;
+
+ if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
+ return -ENOMEM;
+
+ ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+ cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
+
+ return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
+
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma)
+{
+ int ret;
+
+ ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
+ if (!ret)
+ atomic_inc(&tx_chn->free_pkts);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ u32 txrt_ctl;
+
+ txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ txrt_ctl);
+
+ txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ txrt_ctl);
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
+
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
+
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(tx_chn->common.dev, "TX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
+
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
+{
+ dma_addr_t desc_dma;
+ int occ_tx, i, ret;
+
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
+
+ /*
+ * TXQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save TXQ occ
+ * 2) clean up TXQ and call callback .cleanup() for each desc
+ * 3) reset TXQ in a special way
+ */
+ occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
+ dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+
+ for (i = 0; i < occ_tx; i++) {
+ ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
+
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return tx_chn->common.hdesc_size;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
+
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
+
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+
+ return tx_chn->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+
+static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = rx_chn->udma_rchan_id;
+ req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
+ /*
+ * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
+ * and udmax impl, so just configure it to invalid value.
+ * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
+ */
+ req.rxcq_qnum = 0xFFFF;
+ if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ /* Default flow + extra ones */
+ req.flowid_start = rx_chn->flow_id_base;
+ req.flowid_cnt = rx_chn->flow_num;
+ }
+ req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
+ if (ret)
+ dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
+ rx_chn->udma_rchan_id, ret);
+
+ return ret;
+}
+
+static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ if (IS_ERR_OR_NULL(flow->udma_rflow))
+ return;
+
+ if (flow->ringrxfdq)
+ k3_ringacc_ring_free(flow->ringrxfdq);
+
+ if (flow->ringrx)
+ k3_ringacc_ring_free(flow->ringrx);
+
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ flow->udma_rflow = NULL;
+ rx_chn->flows_ready--;
+}
+
+static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ if (IS_ERR(flow->udma_rflow)) {
+ ret = PTR_ERR(flow->udma_rflow);
+ dev_err(dev, "UDMAX rflow get err %d\n", ret);
+ goto err;
+ }
+
+ if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ return -ENODEV;
+ }
+
+ /* request and cfg rings */
+ flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxq_id, 0);
+ if (!flow->ringrx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RX ring\n");
+ goto err;
+ }
+
+ flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxfdq0_id, 0);
+ if (!flow->ringrxfdq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RXFDQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
+ goto err;
+ }
+
+ if (rx_chn->remote) {
+ rx_ring_id = TI_SCI_RESOURCE_NULL;
+ rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
+ } else {
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ if (rx_chn->common.epib)
+ req.rx_einfo_present = 1;
+ if (rx_chn->common.psdata_size)
+ req.rx_psinfo_present = 1;
+ if (flow_cfg->rx_error_handling)
+ req.rx_error_handling = 1;
+ req.rx_desc_type = 0;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_src_tag_hi_sel = 0;
+ req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
+ req.rx_dest_tag_hi_sel = 0;
+ req.rx_dest_tag_lo_sel = 0;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
+ ret);
+ goto err;
+ }
+
+ rx_chn->flows_ready++;
+ dev_dbg(dev, "flow%d config done. ready:%d\n",
+ flow->udma_rflow_id, rx_chn->flows_ready);
+
+ return 0;
+err:
+ k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+ return ret;
+}
+
+static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "dump_rx_chn:\n"
+ "udma_rchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n"
+ "epib: %d\n"
+ "hdesc_size: %u\n"
+ "psdata_size: %u\n"
+ "swdata_size: %u\n"
+ "flow_id_base: %d\n"
+ "flow_num: %d\n",
+ chn->udma_rchan_id,
+ chn->common.src_thread,
+ chn->common.dst_thread,
+ chn->common.epib,
+ chn->common.hdesc_size,
+ chn->common.psdata_size,
+ chn->common.swdata_size,
+ chn->flow_id_base,
+ chn->flow_num);
+}
+
+static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ xudma_rchanrt_read(chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
+}
+
+static int
+k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ int ret;
+
+ /* default rflow */
+ if (cfg->flow_id_use_rxchan_id)
+ return 0;
+
+ /* not a GP rflows */
+ if (rx_chn->flow_id_base != -1 &&
+ !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ return 0;
+
+ /* Allocate range of GP rflows */
+ ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+ if (ret < 0) {
+ dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+ rx_chn->flow_id_base, rx_chn->flow_num, ret);
+ return ret;
+ }
+ rx_chn->flow_id_base = ret;
+
+ return 0;
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0)
+ return ERR_PTR(-EINVAL);
+
+ if (cfg->flow_id_num != 1 &&
+ (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
+ return ERR_PTR(-EINVAL);
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP RX channel */
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ if (IS_ERR(rx_chn->udma_rchanx)) {
+ ret = PTR_ERR(rx_chn->udma_rchanx);
+ dev_err(dev, "UDMAX rchanx get err %d\n", ret);
+ goto err;
+ }
+ rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
+
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ /* request and cfg psi-l */
+ rx_chn->common.dst_thread =
+ xudma_dev_get_psil_base(rx_chn->common.udmax) +
+ rx_chn->udma_rchan_id;
+
+ ret = k3_udma_glue_cfg_rx_chn(rx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg rchan %d\n", ret);
+ goto err;
+ }
+
+ /* init default RX flow only if flow_num = 1 */
+ if (cfg->def_flow_cfg) {
+ ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
+ if (ret)
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->psil_paired = true;
+
+ /* reset RX RT registers */
+ k3_udma_glue_disable_rx_chn(rx_chn);
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ if (cfg->remote)
+ return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
+ else
+ return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rx_chn->common.udmax))
+ return;
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ k3_udma_glue_release_rx_flow(rx_chn, i);
+
+ if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ xudma_free_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+
+ if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
+ xudma_rchan_put(rx_chn->common.udmax,
+ rx_chn->udma_rchanx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
+
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
+
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ flow = &rx_chn->flows[flow_idx];
+
+ return k3_ringacc_get_ring_id(flow->ringrxfdq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
+
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ return rx_chn->flow_id_base;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
+
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
+
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ u32 rxrt_ctl;
+
+ if (rx_chn->remote)
+ return -EINVAL;
+
+ if (rx_chn->flows_ready < rx_chn->flow_num)
+ return -EINVAL;
+
+ rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
+ rxrt_ctl);
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
+
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ 0);
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
+
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ if (rx_chn->remote)
+ return;
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(rx_chn->common.dev, "RX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
+
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+ struct device *dev = rx_chn->common.dev;
+ dma_addr_t desc_dma;
+ int occ_rx, i, ret;
+
+ /* reset RXCQ as it is not input for udma - expected to be empty */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
+ dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
+ if (flow->ringrx)
+ k3_ringacc_ring_reset(flow->ringrx);
+
+ /* Skip RX FDQ in case one FDQ is used for the set of flows */
+ if (skip_fdq)
+ return;
+
+ /*
+ * RX FDQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save RX FDQ occ
+ * 2) clean up RX FDQ and call callback .cleanup() for each desc
+ * 3) reset RX FDQ in a special way
+ */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
+ dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
+
+ for (i = 0; i < occ_rx; i++) {
+ ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
+ if (ret) {
+ dev_err(dev, "RX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
+
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_rx,
+ dma_addr_t desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
+
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
+
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ flow = &rx_chn->flows[flow_num];
+
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+
+ return flow->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
new file mode 100644
index 000000000000..0b8f3dd6b146
--- /dev/null
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_pair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_pair);
+
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_unpair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_unpair);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+{
+ struct device_node *udma_node = np;
+ struct platform_device *pdev;
+ struct udma_dev *ud;
+
+ if (property) {
+ udma_node = of_parse_phandle(np, property, 0);
+ if (!udma_node) {
+ pr_err("UDMA node is not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ pdev = of_find_device_by_node(udma_node);
+ if (!pdev) {
+ pr_debug("UDMA device not found\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (np != udma_node)
+ of_node_put(udma_node);
+
+ ud = platform_get_drvdata(pdev);
+ if (!ud) {
+ pr_debug("UDMA has not been probed\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return ud;
+}
+EXPORT_SYMBOL(of_xudma_dev_get);
+
+u32 xudma_dev_get_psil_base(struct udma_dev *ud)
+{
+ return ud->psil_base;
+}
+EXPORT_SYMBOL(xudma_dev_get_psil_base);
+
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
+{
+ return &ud->tisci_rm;
+}
+EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_alloc_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
+
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_free_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+ return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
+
+#define XUDMA_GET_PUT_RESOURCE(res) \
+struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
+{ \
+ return __udma_reserve_##res(ud, false, id); \
+} \
+EXPORT_SYMBOL(xudma_##res##_get); \
+ \
+void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \
+{ \
+ clear_bit(p->id, ud->res##_map); \
+} \
+EXPORT_SYMBOL(xudma_##res##_put)
+XUDMA_GET_PUT_RESOURCE(tchan);
+XUDMA_GET_PUT_RESOURCE(rchan);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+ return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+ __udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
+
+#define XUDMA_GET_RESOURCE_ID(res) \
+int xudma_##res##_get_id(struct udma_##res *p) \
+{ \
+ return p->id; \
+} \
+EXPORT_SYMBOL(xudma_##res##_get_id)
+XUDMA_GET_RESOURCE_ID(tchan);
+XUDMA_GET_RESOURCE_ID(rchan);
+XUDMA_GET_RESOURCE_ID(rflow);
+
+/* Exported register access functions */
+#define XUDMA_RT_IO_FUNCTIONS(res) \
+u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \
+{ \
+ return udma_##res##rt_read(p, reg); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_read); \
+ \
+void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
+{ \
+ udma_##res##rt_write(p, reg, val); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_write)
+XUDMA_RT_IO_FUNCTIONS(tchan);
+XUDMA_RT_IO_FUNCTIONS(rchan);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
new file mode 100644
index 000000000000..ea79c2df28e0
--- /dev/null
+++ b/drivers/dma/ti/k3-udma.c
@@ -0,0 +1,3432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "../virt-dma.h"
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct udma_static_tr {
+ u8 elsize; /* RPSTR0 */
+ u16 elcnt; /* RPSTR0 */
+ u16 bstcnt; /* RPSTR1 */
+};
+
+#define K3_UDMA_MAX_RFLOWS 1024
+#define K3_UDMA_DEFAULT_RING_SIZE 16
+
+/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
+#define UDMA_RFLOW_SRCTAG_NONE 0
+#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
+#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
+#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
+
+#define UDMA_RFLOW_DSTTAG_NONE 0
+#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
+#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
+#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
+#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
+
+struct udma_chan;
+
+enum udma_mmr {
+ MMR_GCFG = 0,
+ MMR_RCHANRT,
+ MMR_TCHANRT,
+ MMR_LAST,
+};
+
+static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+
+struct udma_tchan {
+ void __iomem *reg_rt;
+
+ int id;
+ struct k3_ring *t_ring; /* Transmit ring */
+ struct k3_ring *tc_ring; /* Transmit Completion ring */
+};
+
+struct udma_rflow {
+ int id;
+ struct k3_ring *fd_ring; /* Free Descriptor ring */
+ struct k3_ring *r_ring; /* Receive ring */
+};
+
+struct udma_rchan {
+ void __iomem *reg_rt;
+
+ int id;
+};
+
+#define UDMA_FLAG_PDMA_ACC32 BIT(0)
+#define UDMA_FLAG_PDMA_BURST BIT(1)
+
+struct udma_match_data {
+ u32 psil_base;
+ bool enable_memcpy_support;
+ u32 flags;
+ u32 statictr_z_mask;
+ u32 rchan_oes_offset;
+
+ u8 tpl_levels;
+ u32 level_start_idx[];
+};
+
+struct udma_dev {
+ struct dma_device ddev;
+ struct device *dev;
+ void __iomem *mmrs[MMR_LAST];
+ const struct udma_match_data *match_data;
+
+ size_t desc_align; /* alignment to use for descriptors */
+
+ struct udma_tisci_rm tisci_rm;
+
+ struct k3_ringacc *ringacc;
+
+ struct work_struct purge_work;
+ struct list_head desc_to_purge;
+ spinlock_t lock;
+
+ int tchan_cnt;
+ int echan_cnt;
+ int rchan_cnt;
+ int rflow_cnt;
+ unsigned long *tchan_map;
+ unsigned long *rchan_map;
+ unsigned long *rflow_gp_map;
+ unsigned long *rflow_gp_map_allocated;
+ unsigned long *rflow_in_use;
+
+ struct udma_tchan *tchans;
+ struct udma_rchan *rchans;
+ struct udma_rflow *rflows;
+
+ struct udma_chan *channels;
+ u32 psil_base;
+};
+
+struct udma_hwdesc {
+ size_t cppi5_desc_size;
+ void *cppi5_desc_vaddr;
+ dma_addr_t cppi5_desc_paddr;
+
+ /* TR descriptor internal pointers */
+ void *tr_req_base;
+ struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_desc {
+ struct virt_dma_desc vd;
+
+ bool terminated;
+
+ enum dma_transfer_direction dir;
+
+ struct udma_static_tr static_tr;
+ u32 residue;
+
+ unsigned int sglen;
+ unsigned int desc_idx; /* Only used for cyclic in packet mode */
+ unsigned int tr_idx;
+
+ u32 metadata_size;
+ void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
+
+ unsigned int hwdesc_count;
+ struct udma_hwdesc hwdesc[0];
+};
+
+enum udma_chan_state {
+ UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
+ UDMA_CHAN_IS_ACTIVE, /* Normal operation */
+ UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
+};
+
+struct udma_tx_drain {
+ struct delayed_work work;
+ unsigned long jiffie;
+ u32 residue;
+};
+
+struct udma_chan_config {
+ bool pkt_mode; /* TR or packet */
+ bool needs_epib; /* EPIB is needed for the communication or not */
+ u32 psd_size; /* size of Protocol Specific Data */
+ u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
+ u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+ bool notdpkt; /* Suppress sending TDC packet */
+ int remote_thread_id;
+ u32 src_thread;
+ u32 dst_thread;
+ enum psil_endpoint_type ep_type;
+ bool enable_acc32;
+ bool enable_burst;
+ enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+
+ enum dma_transfer_direction dir;
+};
+
+struct udma_chan {
+ struct virt_dma_chan vc;
+ struct dma_slave_config cfg;
+ struct udma_dev *ud;
+ struct udma_desc *desc;
+ struct udma_desc *terminated_desc;
+ struct udma_static_tr static_tr;
+ char *name;
+
+ struct udma_tchan *tchan;
+ struct udma_rchan *rchan;
+ struct udma_rflow *rflow;
+
+ bool psil_paired;
+
+ int irq_num_ring;
+ int irq_num_udma;
+
+ bool cyclic;
+ bool paused;
+
+ enum udma_chan_state state;
+ struct completion teardown_completed;
+
+ struct udma_tx_drain tx_drain;
+
+ u32 bcnt; /* number of bytes completed since the start of the channel */
+ u32 in_ring_cnt; /* number of descriptors in flight */
+
+ /* Channel configuration parameters */
+ struct udma_chan_config config;
+
+ /* dmapool for packet mode descriptors */
+ bool use_dma_pool;
+ struct dma_pool *hdesc_pool;
+
+ u32 id;
+};
+
+static inline struct udma_dev *to_udma_dev(struct dma_device *d)
+{
+ return container_of(d, struct udma_dev, ddev);
+}
+
+static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct udma_chan, vc.chan);
+}
+
+static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct udma_desc, vd.tx);
+}
+
+/* Generic register access functions */
+static inline u32 udma_read(void __iomem *base, int reg)
+{
+ return readl(base + reg);
+}
+
+static inline void udma_write(void __iomem *base, int reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static inline void udma_update_bits(void __iomem *base, int reg,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(base + reg);
+ tmp = orig & ~mask;
+ tmp |= (val & mask);
+
+ if (tmp != orig)
+ writel(tmp, base + reg);
+}
+
+/* TCHANRT */
+static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
+{
+ if (!tchan)
+ return 0;
+ return udma_read(tchan->reg_rt, reg);
+}
+
+static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
+ u32 val)
+{
+ if (!tchan)
+ return;
+ udma_write(tchan->reg_rt, reg, val);
+}
+
+static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!tchan)
+ return;
+ udma_update_bits(tchan->reg_rt, reg, mask, val);
+}
+
+/* RCHANRT */
+static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
+{
+ if (!rchan)
+ return 0;
+ return udma_read(rchan->reg_rt, reg);
+}
+
+static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
+ u32 val)
+{
+ if (!rchan)
+ return;
+ udma_write(rchan->reg_rt, reg, val);
+}
+
+static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!rchan)
+ return;
+ udma_update_bits(rchan->reg_rt, reg, mask, val);
+}
+
+static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+ memset(&uc->config, 0, sizeof(uc->config));
+ uc->config.remote_thread_id = -1;
+ uc->state = UDMA_CHAN_IS_IDLE;
+}
+
+static void udma_dump_chan_stdata(struct udma_chan *uc)
+{
+ struct device *dev = uc->ud->dev;
+ u32 offset;
+ int i;
+
+ if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "TCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_tchanrt_read(uc->tchan, offset));
+ }
+ }
+
+ if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "RCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_rchanrt_read(uc->rchan, offset));
+ }
+ }
+}
+
+static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
+ int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_paddr;
+}
+
+static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_vaddr;
+}
+
+static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
+ dma_addr_t paddr)
+{
+ struct udma_desc *d = uc->terminated_desc;
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+
+ if (!d) {
+ d = uc->desc;
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+ }
+
+ return d;
+}
+
+static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
+{
+ if (uc->use_dma_pool) {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_pool_free(uc->hdesc_pool,
+ d->hwdesc[i].cppi5_desc_vaddr,
+ d->hwdesc[i].cppi5_desc_paddr);
+
+ d->hwdesc[i].cppi5_desc_vaddr = NULL;
+ }
+ } else if (d->hwdesc[0].cppi5_desc_vaddr) {
+ struct udma_dev *ud = uc->ud;
+
+ dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ d->hwdesc[0].cppi5_desc_vaddr,
+ d->hwdesc[0].cppi5_desc_paddr);
+
+ d->hwdesc[0].cppi5_desc_vaddr = NULL;
+ }
+}
+
+static void udma_purge_desc_work(struct work_struct *work)
+{
+ struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
+ struct virt_dma_desc *vd, *_vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_splice_tail_init(&ud->desc_to_purge, &head);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+
+ udma_free_hwdesc(uc, d);
+ list_del(&vd->node);
+ kfree(d);
+ }
+
+ /* If more to purge, schedule the work again */
+ if (!list_empty(&ud->desc_to_purge))
+ schedule_work(&ud->purge_work);
+}
+
+static void udma_desc_free(struct virt_dma_desc *vd)
+{
+ struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+ unsigned long flags;
+
+ if (uc->terminated_desc == d)
+ uc->terminated_desc = NULL;
+
+ if (uc->use_dma_pool) {
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return;
+ }
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_add_tail(&vd->node, &ud->desc_to_purge);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ schedule_work(&ud->purge_work);
+}
+
+static bool udma_is_chan_running(struct udma_chan *uc)
+{
+ u32 trt_ctl = 0;
+ u32 rrt_ctl = 0;
+
+ if (uc->tchan)
+ trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ if (uc->rchan)
+ rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
+
+ if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
+ return true;
+
+ return false;
+}
+
+static bool udma_is_chan_paused(struct udma_chan *uc)
+{
+ u32 val, pause_mask;
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ val = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_DEV:
+ val = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_MEM:
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
+ break;
+ default:
+ return false;
+ }
+
+ if (val & pause_mask)
+ return true;
+
+ return false;
+}
+
+static void udma_sync_for_device(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ if (uc->cyclic && uc->config.pkt_mode) {
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[idx].cppi5_desc_paddr,
+ d->hwdesc[idx].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ } else {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[i].cppi5_desc_paddr,
+ d->hwdesc[i].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int udma_push_to_ring(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ struct k3_ring *ring = NULL;
+ int ret = -EINVAL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->fd_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->t_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring) {
+ dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+
+ wmb(); /* Ensure that writes are not moved over this point */
+ udma_sync_for_device(uc, idx);
+ ret = k3_ringacc_ring_push(ring, &desc_addr);
+ uc->in_ring_cnt++;
+ }
+
+ return ret;
+}
+
+static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
+{
+ struct k3_ring *ring = NULL;
+ int ret = -ENOENT;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->r_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->tc_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring && k3_ringacc_ring_get_occ(ring)) {
+ struct udma_desc *d = NULL;
+
+ ret = k3_ringacc_ring_pop(ring, addr);
+ if (ret)
+ return ret;
+
+ /* Teardown completion */
+ if (cppi5_desc_is_tdcm(*addr))
+ return ret;
+
+ d = udma_udma_desc_from_paddr(uc, *addr);
+
+ if (d)
+ dma_sync_single_for_cpu(uc->ud->dev, *addr,
+ d->hwdesc[0].cppi5_desc_size,
+ DMA_FROM_DEVICE);
+ rmb(); /* Ensure that reads are not moved before this point */
+
+ if (!ret)
+ uc->in_ring_cnt--;
+ }
+
+ return ret;
+}
+
+static void udma_reset_rings(struct udma_chan *uc)
+{
+ struct k3_ring *ring1 = NULL;
+ struct k3_ring *ring2 = NULL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ if (uc->rchan) {
+ ring1 = uc->rflow->fd_ring;
+ ring2 = uc->rflow->r_ring;
+ }
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ if (uc->tchan) {
+ ring1 = uc->tchan->t_ring;
+ ring2 = uc->tchan->tc_ring;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (ring1)
+ k3_ringacc_ring_reset_dma(ring1,
+ k3_ringacc_ring_get_occ(ring1));
+ if (ring2)
+ k3_ringacc_ring_reset(ring2);
+
+ /* make sure we are not leaking memory by stalled descriptor */
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ uc->in_ring_cnt = 0;
+}
+
+static void udma_reset_counters(struct udma_chan *uc)
+{
+ u32 val;
+
+ if (uc->tchan) {
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ if (uc->rchan) {
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ uc->bcnt = 0;
+}
+
+static int udma_reset_chan(struct udma_chan *uc, bool hard)
+{
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reset all counters */
+ udma_reset_counters(uc);
+
+ /* Hard reset: re-initialize the channel to reset */
+ if (hard) {
+ struct udma_chan_config ucc_backup;
+ int ret;
+
+ memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
+ uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
+
+ /* restore the channel configuration */
+ memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
+ ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting forced teardown after forced reset helps recovering
+ * the rchan.
+ */
+ if (uc->config.dir == DMA_DEV_TO_MEM)
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN |
+ UDMA_CHAN_RT_CTL_FTDOWN);
+ }
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ return 0;
+}
+
+static void udma_start_desc(struct udma_chan *uc)
+{
+ struct udma_chan_config *ucc = &uc->config;
+
+ if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ int i;
+
+ /* Push all descriptors to ring for packet mode cyclic or RX */
+ for (i = 0; i < uc->desc->sglen; i++)
+ udma_push_to_ring(uc, i);
+ } else {
+ udma_push_to_ring(uc, 0);
+ }
+}
+
+static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
+{
+ /* Only PDMAs have staticTR */
+ if (uc->config.ep_type == PSIL_EP_NATIVE)
+ return false;
+
+ /* Check if the staticTR configuration has changed for TX */
+ if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
+ return true;
+
+ return false;
+}
+
+static int udma_start(struct udma_chan *uc)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
+
+ if (!vd) {
+ uc->desc = NULL;
+ return -ENOENT;
+ }
+
+ list_del(&vd->node);
+
+ uc->desc = to_udma_desc(&vd->tx);
+
+ /* Channel is already running and does not need reconfiguration */
+ if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
+ udma_start_desc(uc);
+ goto out;
+ }
+
+ /* Make sure that we clear the teardown bit, if it is set */
+ udma_reset_chan(uc, false);
+
+ /* Push descriptors before we start the channel */
+ udma_start_desc(uc);
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+ const struct udma_match_data *match_data =
+ uc->ud->match_data;
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
+ PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
+ match_data->statictr_z_mask));
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ /* Enable remote */
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_tchanrt_write(uc->tchan,
+ UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ /* Enable remote */
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ uc->state = UDMA_CHAN_IS_ACTIVE;
+out:
+
+ return 0;
+}
+
+static int udma_stop(struct udma_chan *uc)
+{
+ enum udma_chan_state old_state = uc->state;
+
+ uc->state = UDMA_CHAN_IS_TERMINATING;
+ reinit_completion(&uc->teardown_completed);
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_TEARDOWN);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_FLUSH);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ default:
+ uc->state = old_state;
+ complete_all(&uc->teardown_completed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
+{
+ struct udma_desc *d = uc->desc;
+ struct cppi5_host_desc_t *h_desc;
+
+ h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
+ cppi5_hdesc_reset_to_original(h_desc);
+ udma_push_to_ring(uc, d->desc_idx);
+ d->desc_idx = (d->desc_idx + 1) % d->sglen;
+}
+
+static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
+{
+ struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ memcpy(d->metadata, h_desc->epib, d->metadata_size);
+}
+
+static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
+{
+ u32 peer_bcnt, bcnt;
+
+ /* Only TX towards PDMA is affected */
+ if (uc->config.ep_type == PSIL_EP_NATIVE ||
+ uc->config.dir != DMA_MEM_TO_DEV)
+ return true;
+
+ peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+
+ if (peer_bcnt < bcnt) {
+ uc->tx_drain.residue = bcnt - peer_bcnt;
+ uc->tx_drain.jiffie = jiffies;
+ return false;
+ }
+
+ return true;
+}
+
+static void udma_check_tx_completion(struct work_struct *work)
+{
+ struct udma_chan *uc = container_of(work, typeof(*uc),
+ tx_drain.work.work);
+ bool desc_done = true;
+ u32 residue_diff;
+ unsigned long jiffie_diff, delay;
+
+ if (uc->desc) {
+ residue_diff = uc->tx_drain.residue;
+ jiffie_diff = uc->tx_drain.jiffie;
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
+
+ if (!desc_done) {
+ jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /* Try to guess when we should check next time */
+ residue_diff /= jiffie_diff;
+ delay = uc->tx_drain.residue / residue_diff / 3;
+ if (jiffies_to_msecs(delay) < 5)
+ delay = 0;
+ } else {
+ /* No progress, check again in 1 second */
+ delay = HZ;
+ }
+
+ schedule_delayed_work(&uc->tx_drain.work, delay);
+ } else if (uc->desc) {
+ struct udma_desc *d = uc->desc;
+
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+}
+
+static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+ dma_addr_t paddr = 0;
+
+ if (udma_pop_from_ring(uc, &paddr) || !paddr)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* Teardown completion message */
+ if (cppi5_desc_is_tdcm(paddr)) {
+ /* Compensate our internal pop/push counter */
+ uc->in_ring_cnt++;
+
+ complete_all(&uc->teardown_completed);
+
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ if (!uc->desc)
+ udma_start(uc);
+
+ goto out;
+ }
+
+ d = udma_udma_desc_from_paddr(uc, paddr);
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+ if (desc_paddr != paddr) {
+ dev_err(uc->ud->dev, "not matching descriptors!\n");
+ goto out;
+ }
+
+ if (uc->cyclic) {
+ /* push the descriptor back to the ring */
+ if (d == uc->desc) {
+ udma_cyclic_packet_elapsed(uc);
+ vchan_cyclic_callback(&d->vd);
+ }
+ } else {
+ bool desc_done = false;
+
+ if (d == uc->desc) {
+ desc_done = udma_is_desc_really_done(uc, d);
+
+ if (desc_done) {
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ } else {
+ schedule_delayed_work(&uc->tx_drain.work,
+ 0);
+ }
+ }
+
+ if (desc_done)
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t udma_udma_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+ d = uc->desc;
+ if (d) {
+ d->tr_idx = (d->tr_idx + 1) % d->sglen;
+
+ if (uc->cyclic) {
+ vchan_cyclic_callback(&d->vd);
+ } else {
+ /* TODO: figure out the real amount of data */
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ int start, tmp_from;
+ DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+ tmp_from = from;
+ if (tmp_from < 0)
+ tmp_from = ud->rchan_cnt;
+ /* default flows can't be allocated and accessible only by id */
+ if (tmp_from < ud->rchan_cnt)
+ return -EINVAL;
+
+ if (tmp_from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+ ud->rflow_cnt);
+
+ start = bitmap_find_next_zero_area(tmp,
+ ud->rflow_cnt,
+ tmp_from, cnt, 0);
+ if (start >= ud->rflow_cnt)
+ return -ENOMEM;
+
+ if (from >= 0 && start != from)
+ return -EEXIST;
+
+ bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+ return start;
+}
+
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ if (from < ud->rchan_cnt)
+ return -EINVAL;
+ if (from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+ return 0;
+}
+
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+ /*
+ * Attempt to request rflow by ID can be made for any rflow
+ * if not in use with assumption that caller knows what's doing.
+ * TI-SCI FW will perform additional permission check ant way, it's
+ * safe
+ */
+
+ if (id < 0 || id >= ud->rflow_cnt)
+ return ERR_PTR(-ENOENT);
+
+ if (test_bit(id, ud->rflow_in_use))
+ return ERR_PTR(-ENOENT);
+
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+
+ dev_dbg(ud->dev, "get rflow%d\n", id);
+ set_bit(id, ud->rflow_in_use);
+ return &ud->rflows[id];
+}
+
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+ if (!test_bit(rflow->id, ud->rflow_in_use)) {
+ dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+ return;
+ }
+
+ dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+ clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res) \
+static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
+ enum udma_tp_level tpl, \
+ int id) \
+{ \
+ if (id >= 0) { \
+ if (test_bit(id, ud->res##_map)) { \
+ dev_err(ud->dev, "res##%d is in use\n", id); \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } else { \
+ int start; \
+ \
+ if (tpl >= ud->match_data->tpl_levels) \
+ tpl = ud->match_data->tpl_levels - 1; \
+ \
+ start = ud->match_data->level_start_idx[tpl]; \
+ \
+ id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
+ start); \
+ if (id == ud->res##_cnt) { \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } \
+ \
+ set_bit(id, ud->res##_map); \
+ return &ud->res##s[id]; \
+}
+
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+static int udma_get_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->tchan))
+ return PTR_ERR(uc->tchan);
+
+ return 0;
+}
+
+static int udma_get_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return 0;
+ }
+
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->rchan))
+ return PTR_ERR(uc->rchan);
+
+ return 0;
+}
+
+static int udma_get_chan_pair(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ const struct udma_match_data *match_data = ud->match_data;
+ int chan_id, end;
+
+ if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+ dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ if (uc->tchan) {
+ dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return -EBUSY;
+ } else if (uc->rchan) {
+ dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return -EBUSY;
+ }
+
+ /* Can be optimized, but let's have it like this for now */
+ end = min(ud->tchan_cnt, ud->rchan_cnt);
+ /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
+ chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
+ for (; chan_id < end; chan_id++) {
+ if (!test_bit(chan_id, ud->tchan_map) &&
+ !test_bit(chan_id, ud->rchan_map))
+ break;
+ }
+
+ if (chan_id == end)
+ return -ENOENT;
+
+ set_bit(chan_id, ud->tchan_map);
+ set_bit(chan_id, ud->rchan_map);
+ uc->tchan = &ud->tchans[chan_id];
+ uc->rchan = &ud->rchans[chan_id];
+
+ return 0;
+}
+
+static int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (!uc->rchan) {
+ dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+ return -EINVAL;
+ }
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+ uc->id, uc->rflow->id);
+ return 0;
+ }
+
+ uc->rflow = __udma_get_rflow(ud, flow_id);
+ if (IS_ERR(uc->rflow))
+ return PTR_ERR(uc->rflow);
+
+ return 0;
+}
+
+static void udma_put_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+ uc->rchan->id);
+ clear_bit(uc->rchan->id, ud->rchan_map);
+ uc->rchan = NULL;
+ }
+}
+
+static void udma_put_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+ uc->tchan->id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ }
+}
+
+static void udma_put_rflow(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+ uc->rflow->id);
+ __udma_put_rflow(ud, uc->rflow);
+ uc->rflow = NULL;
+ }
+}
+
+static void udma_free_tx_resources(struct udma_chan *uc)
+{
+ if (!uc->tchan)
+ return;
+
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->t_ring = NULL;
+ uc->tchan->tc_ring = NULL;
+
+ udma_put_tchan(uc);
+}
+
+static int udma_alloc_tx_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = udma_get_tchan(uc);
+ if (ret)
+ return ret;
+
+ uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
+ uc->tchan->id, 0);
+ if (!uc->tchan->t_ring) {
+ ret = -EBUSY;
+ goto err_tx_ring;
+ }
+
+ uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!uc->tchan->tc_ring) {
+ ret = -EBUSY;
+ goto err_txc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->tc_ring = NULL;
+err_txc_ring:
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ uc->tchan->t_ring = NULL;
+err_tx_ring:
+ udma_put_tchan(uc);
+
+ return ret;
+}
+
+static void udma_free_rx_resources(struct udma_chan *uc)
+{
+ if (!uc->rchan)
+ return;
+
+ if (uc->rflow) {
+ struct udma_rflow *rflow = uc->rflow;
+
+ k3_ringacc_ring_free(rflow->fd_ring);
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->fd_ring = NULL;
+ rflow->r_ring = NULL;
+
+ udma_put_rflow(uc);
+ }
+
+ udma_put_rchan(uc);
+}
+
+static int udma_alloc_rx_resources(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct k3_ring_cfg ring_cfg;
+ struct udma_rflow *rflow;
+ int fd_ring_id;
+ int ret;
+
+ ret = udma_get_rchan(uc);
+ if (ret)
+ return ret;
+
+ /* For MEM_TO_MEM we don't need rflow or rings */
+ if (uc->config.dir == DMA_MEM_TO_MEM)
+ return 0;
+
+ ret = udma_get_rflow(uc, uc->rchan->id);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_rflow;
+ }
+
+ rflow = uc->rflow;
+ fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
+ if (!rflow->fd_ring) {
+ ret = -EBUSY;
+ goto err_rx_ring;
+ }
+
+ rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!rflow->r_ring) {
+ ret = -EBUSY;
+ goto err_rxc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->r_ring = NULL;
+err_rxc_ring:
+ k3_ringacc_ring_free(rflow->fd_ring);
+ rflow->fd_ring = NULL;
+err_rx_ring:
+ udma_put_rflow(uc);
+err_rflow:
+ udma_put_rchan(uc);
+
+ return ret;
+}
+
+#define TISCI_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+
+#define TISCI_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+
+static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct udma_rchan *rchan = uc->rchan;
+ int ret = 0;
+
+ /* Non synchronized - mem to mem type of transfer */
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+ req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret) {
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+ return ret;
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_rx.rxcq_qnum = tc_ring;
+ req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = mode;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ req_tx.tx_fetch_size = fetch_size >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
+ int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = fetch_size >> 2;
+ req_rx.rxcq_qnum = rx_ring;
+ req_rx.rx_chan_type = mode;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = rchan->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+ flow_req.rx_dest_qnum = rx_ring;
+ flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
+ flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
+ flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
+ flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
+ flow_req.rx_fdq0_sz0_qnum = fd_ring;
+ flow_req.rx_fdq1_qnum = fd_ring;
+ flow_req.rx_fdq2_qnum = fd_ring;
+ flow_req.rx_fdq3_qnum = fd_ring;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
+
+ return 0;
+}
+
+static int udma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_match_data *match_data = ud->match_data;
+ struct k3_ring *irq_ring;
+ u32 irq_udma_idx;
+ int ret;
+
+ if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->use_dma_pool = true;
+ /* in case of MEM_TO_MEM we have maximum of two TRs */
+ if (uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+ uc->config.pkt_mode = false;
+ }
+ }
+
+ if (uc->use_dma_pool) {
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_get_chan_pair(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ udma_free_tx_resources(uc);
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->rflow->r_ring;
+ irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+
+ ret = udma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_stop(uc);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ goto err_res_free;
+ }
+ }
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ k3_ringacc_get_ring_id(irq_ring));
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from UDMA (TR events) only needed for slave TR mode channels */
+ if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int udma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
+
+ return 0;
+}
+
+static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
+ size_t tr_size, int tr_count,
+ enum dma_transfer_direction dir)
+{
+ struct udma_hwdesc *hwdesc;
+ struct cppi5_desc_hdr_t *tr_desc;
+ struct udma_desc *d;
+ u32 reload_count = 0;
+ u32 ring_id;
+
+ switch (tr_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
+ return NULL;
+ }
+
+ /* We have only one descriptor containing multiple TRs */
+ d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = tr_count;
+
+ d->hwdesc_count = 1;
+ hwdesc = &d->hwdesc[0];
+
+ /* Allocate memory for DMA ring descriptor */
+ if (uc->use_dma_pool) {
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ } else {
+ hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
+ tr_count);
+ hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+ uc->ud->desc_align);
+ hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
+ hwdesc->cppi5_desc_size,
+ &hwdesc->cppi5_desc_paddr,
+ GFP_NOWAIT);
+ }
+
+ if (!hwdesc->cppi5_desc_vaddr) {
+ kfree(d);
+ return NULL;
+ }
+
+ /* Start of the TR req records */
+ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+ /* Start address of the TR response array */
+ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
+
+ tr_desc = hwdesc->cppi5_desc_vaddr;
+
+ if (uc->cyclic)
+ reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
+ cppi5_desc_set_pktids(tr_desc, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req = NULL;
+ unsigned int i;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->residue += sg_dma_len(sgent);
+
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[i].addr = sg_dma_address(sgent);
+ tr_req[i].icnt0 = burst * dev_width;
+ tr_req[i].dim1 = burst * dev_width;
+ tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+ }
+
+ cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
+static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
+ enum dma_slave_buswidth dev_width,
+ u16 elcnt)
+{
+ if (uc->config.ep_type != PSIL_EP_PDMA_XY)
+ return 0;
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ d->static_tr.elsize = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ d->static_tr.elsize = 1;
+ break;
+ case DMA_SLAVE_BUSWIDTH_3_BYTES:
+ d->static_tr.elsize = 2;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ d->static_tr.elsize = 3;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ d->static_tr.elsize = 4;
+ break;
+ default: /* not reached */
+ return -EINVAL;
+ }
+
+ d->static_tr.elcnt = elcnt;
+
+ /*
+ * PDMA must to close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA to close
+ * the packet otherwise the transfer will stall because PDMA holds on
+ * the data it has received from the peripheral.
+ */
+ if (uc->config.pkt_mode || !uc->cyclic) {
+ unsigned int div = dev_width * elcnt;
+
+ if (uc->cyclic)
+ d->static_tr.bstcnt = d->residue / d->sglen / div;
+ else
+ d->static_tr.bstcnt = d->residue / div;
+
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+ } else {
+ d->static_tr.bstcnt = 0;
+ }
+
+ return 0;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_host_desc_t *h_desc = NULL;
+ struct udma_desc *d;
+ u32 ring_id;
+ unsigned int i;
+
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+ d->hwdesc_count = sglen;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for_each_sg(sgl, sgent, sglen, i) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+ struct cppi5_host_desc_t *desc;
+ size_t sg_len = sg_dma_len(sgent);
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ d->residue += sg_len;
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ desc = hwdesc->cppi5_desc_vaddr;
+
+ if (i == 0) {
+ cppi5_hdesc_init(desc, 0, 0);
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
+ } else {
+ cppi5_hdesc_reset_hbdesc(desc);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
+ }
+
+ /* attach the sg buffer to the descriptor */
+ cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
+
+ /* Attach link as host buffer descriptor */
+ if (h_desc)
+ cppi5_hdesc_link_hbdesc(h_desc,
+ hwdesc->cppi5_desc_paddr);
+
+ if (dir == DMA_MEM_TO_DEV)
+ h_desc = desc;
+ }
+
+ if (d->residue >= SZ_4M) {
+ dev_err(uc->ud->dev,
+ "%s: Transfer size %u is over the supported 4M range\n",
+ __func__, d->residue);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ cppi5_hdesc_set_pktlen(h_desc, d->residue);
+
+ return d;
+}
+
+static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (!data || len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ if (d->dir == DMA_MEM_TO_DEV)
+ memcpy(h_desc->epib, data, len);
+
+ if (uc->config.needs_epib)
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ d->metadata = data;
+ d->metadata_size = len;
+ if (uc->config.needs_epib)
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return ERR_PTR(-ENOTSUPP);
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ *max_len = uc->config.metadata_size;
+
+ *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
+ CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
+ *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
+
+ return h_desc->epib;
+}
+
+static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = payload_len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (payload_len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ if (uc->config.needs_epib) {
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+ }
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static struct dma_descriptor_metadata_ops metadata_ops = {
+ .attach = udma_attach_metadata,
+ .get_ptr = udma_get_metadata_ptr,
+ .set_len = udma_set_metadata_len,
+};
+
+static struct dma_async_tx_descriptor *
+udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
+ context);
+ else
+ d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
+ context);
+
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req;
+ unsigned int i;
+ unsigned int periods = buf_len / period_len;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+ if (!d)
+ return NULL;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for (i = 0; i < periods; i++) {
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[i].addr = buf_addr + period_len * i;
+ tr_req[i].icnt0 = dev_width;
+ tr_req[i].icnt1 = period_len / dev_width;
+ tr_req[i].dim1 = dev_width;
+
+ if (!(flags & DMA_PREP_INTERRUPT))
+ cppi5_tr_csf_set(&tr_req[i].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ }
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct udma_desc *d;
+ u32 ring_id;
+ int i;
+ int periods = buf_len / period_len;
+
+ if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
+ return NULL;
+
+ if (period_len >= SZ_4M)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->hwdesc_count = periods;
+
+ /* TODO: re-check this... */
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for (i = 0; i < periods; i++) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t period_addr = buf_addr + (period_len * i);
+ struct cppi5_host_desc_t *h_desc;
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ h_desc = hwdesc->cppi5_desc_vaddr;
+
+ cppi5_hdesc_init(h_desc, 0, 0);
+ cppi5_hdesc_set_pktlen(h_desc, period_len);
+
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
+
+ /* attach each period to a new descriptor */
+ cppi5_hdesc_attach_buf(h_desc,
+ period_addr, period_len,
+ period_addr, period_len);
+ }
+
+ return d;
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ uc->cyclic = true;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+ else
+ d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+
+ if (!d)
+ return NULL;
+
+ d->sglen = buf_len / period_len;
+
+ d->dir = dir;
+ d->residue = buf_len;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_desc *d;
+ struct cppi5_tr_type15_t *tr_req;
+ int num_tr;
+ size_t tr_size = sizeof(struct cppi5_tr_type15_t);
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+
+ if (uc->config.dir != DMA_MEM_TO_MEM) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(DMA_MEM_TO_MEM));
+ return NULL;
+ }
+
+ if (len < SZ_64K) {
+ num_tr = 1;
+ tr0_cnt0 = len;
+ tr0_cnt1 = 1;
+ } else {
+ unsigned long align_to = __ffs(src | dest);
+
+ if (align_to > 3)
+ align_to = 3;
+ /*
+ * Keep simple: tr0: SZ_64K-alignment blocks,
+ * tr1: the remaining
+ */
+ num_tr = 2;
+ tr0_cnt0 = (SZ_64K - BIT(align_to));
+ if (len / tr0_cnt0 >= SZ_64K) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ len);
+ return NULL;
+ }
+
+ tr0_cnt1 = len / tr0_cnt0;
+ tr1_cnt0 = len % tr0_cnt0;
+ }
+
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
+ if (!d)
+ return NULL;
+
+ d->dir = DMA_MEM_TO_MEM;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+ d->residue = len;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+
+ cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[0].addr = src;
+ tr_req[0].icnt0 = tr0_cnt0;
+ tr_req[0].icnt1 = tr0_cnt1;
+ tr_req[0].icnt2 = 1;
+ tr_req[0].icnt3 = 1;
+ tr_req[0].dim1 = tr0_cnt0;
+
+ tr_req[0].daddr = dest;
+ tr_req[0].dicnt0 = tr0_cnt0;
+ tr_req[0].dicnt1 = tr0_cnt1;
+ tr_req[0].dicnt2 = 1;
+ tr_req[0].dicnt3 = 1;
+ tr_req[0].ddim1 = tr0_cnt0;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].icnt0 = tr1_cnt0;
+ tr_req[1].icnt1 = 1;
+ tr_req[1].icnt2 = 1;
+ tr_req[1].icnt3 = 1;
+
+ tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].dicnt0 = tr1_cnt0;
+ tr_req[1].dicnt1 = 1;
+ tr_req[1].dicnt2 = 1;
+ tr_req[1].dicnt3 = 1;
+ }
+
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static void udma_issue_pending(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* If we have something pending and no active descriptor, then */
+ if (vchan_issue_pending(&uc->vc) && !uc->desc) {
+ /*
+ * start a descriptor if the channel is NOT [marked as
+ * terminating _and_ it is still running (teardown has not
+ * completed yet)].
+ */
+ if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
+ udma_is_chan_running(uc)))
+ udma_start(uc);
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+}
+
+static enum dma_status udma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
+ ret = DMA_PAUSED;
+
+ if (ret == DMA_COMPLETE || !txstate)
+ goto out;
+
+ if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
+ u32 peer_bcnt = 0;
+ u32 bcnt = 0;
+ u32 residue = uc->desc->residue;
+ u32 delay = 0;
+
+ if (uc->desc->dir == DMA_MEM_TO_DEV) {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_SBCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_BCNT_REG);
+
+ if (bcnt > peer_bcnt)
+ delay = bcnt - peer_bcnt;
+ }
+ } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_BCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_BCNT_REG);
+
+ if (peer_bcnt > bcnt)
+ delay = peer_bcnt - bcnt;
+ }
+ } else {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_BCNT_REG);
+ }
+
+ bcnt -= uc->bcnt;
+ if (bcnt && !(bcnt % uc->desc->residue))
+ residue = 0;
+ else
+ residue -= bcnt % uc->desc->residue;
+
+ if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
+ ret = DMA_COMPLETE;
+ delay = 0;
+ }
+
+ dma_set_residue(txstate, residue);
+ dma_set_in_flight_bytes(txstate, delay);
+
+ } else {
+ ret = DMA_COMPLETE;
+ }
+
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ return ret;
+}
+
+static int udma_pause(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* pause the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE,
+ UDMA_CHAN_RT_CTL_PAUSE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_resume(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* resume the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_terminate_all(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ if (udma_is_chan_running(uc))
+ udma_stop(uc);
+
+ if (uc->desc) {
+ uc->terminated_desc = uc->desc;
+ uc->desc = NULL;
+ uc->terminated_desc->terminated = true;
+ cancel_delayed_work(&uc->tx_drain.work);
+ }
+
+ uc->paused = false;
+
+ vchan_get_all_descriptors(&uc->vc, &head);
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ vchan_dma_desc_free_list(&uc->vc, &head);
+
+ return 0;
+}
+
+static void udma_synchronize(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ vchan_synchronize(&uc->vc);
+
+ if (uc->state == UDMA_CHAN_IS_TERMINATING) {
+ timeout = wait_for_completion_timeout(&uc->teardown_completed,
+ timeout);
+ if (!timeout) {
+ dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
+ uc->id);
+ udma_dump_chan_stdata(uc);
+ udma_reset_chan(uc, true);
+ }
+ }
+
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc))
+ dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ udma_reset_rings(uc);
+}
+
+static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd,
+ struct dmaengine_result *result)
+{
+ struct udma_chan *uc = to_udma_chan(&vc->chan);
+ struct udma_desc *d;
+
+ if (!vd)
+ return;
+
+ d = to_udma_desc(&vd->tx);
+
+ if (d->metadata_size)
+ udma_fetch_epib(uc, d);
+
+ /* Provide residue information for the client */
+ if (result) {
+ void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+ if (cppi5_desc_get_type(desc_vaddr) ==
+ CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ result->residue = d->residue -
+ cppi5_hdesc_get_pktlen(desc_vaddr);
+ if (result->residue)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
+ } else {
+ result->residue = 0;
+ result->result = DMA_TRANS_NOERROR;
+ }
+ }
+}
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void udma_vchan_complete(unsigned long arg)
+{
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_desc *vd, *_vd;
+ struct dmaengine_desc_callback cb;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
+ }
+ spin_unlock_irq(&vc->lock);
+
+ udma_desc_pre_callback(vc, vd, NULL);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct dmaengine_result result;
+
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+
+ list_del(&vd->node);
+
+ udma_desc_pre_callback(vc, vd, &result);
+ dmaengine_desc_callback_invoke(&cb, &result);
+
+ vchan_vdesc_fini(vd);
+ }
+}
+
+static void udma_free_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+
+ udma_terminate_all(chan);
+ if (uc->terminated_desc) {
+ udma_reset_chan(uc, false);
+ udma_reset_rings(uc);
+ }
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
+ if (uc->irq_num_ring > 0) {
+ free_irq(uc->irq_num_ring, uc);
+
+ uc->irq_num_ring = 0;
+ }
+ if (uc->irq_num_udma > 0) {
+ free_irq(uc->irq_num_udma, uc);
+
+ uc->irq_num_udma = 0;
+ }
+
+ /* Release PSI-L pairing */
+ if (uc->psil_paired) {
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+ }
+
+ vchan_free_chan_resources(&uc->vc);
+ tasklet_kill(&uc->vc.task);
+
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+}
+
+static struct platform_driver udma_driver;
+
+static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct udma_chan_config *ucc;
+ struct psil_endpoint_config *ep_config;
+ struct udma_chan *uc;
+ struct udma_dev *ud;
+ u32 *args;
+
+ if (chan->device->dev->driver != &udma_driver.driver)
+ return false;
+
+ uc = to_udma_chan(chan);
+ ucc = &uc->config;
+ ud = uc->ud;
+ args = param;
+
+ ucc->remote_thread_id = args[0];
+
+ if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ ucc->dir = DMA_MEM_TO_DEV;
+ else
+ ucc->dir = DMA_DEV_TO_MEM;
+
+ ep_config = psil_get_ep_config(ucc->remote_thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ return false;
+ }
+
+ ucc->pkt_mode = ep_config->pkt_mode;
+ ucc->channel_tpl = ep_config->channel_tpl;
+ ucc->notdpkt = ep_config->notdpkt;
+ ucc->ep_type = ep_config->ep_type;
+
+ if (ucc->ep_type != PSIL_EP_NATIVE) {
+ const struct udma_match_data *match_data = ud->match_data;
+
+ if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
+ ucc->enable_acc32 = ep_config->pdma_acc32;
+ if (match_data->flags & UDMA_FLAG_PDMA_BURST)
+ ucc->enable_burst = ep_config->pdma_burst;
+ }
+
+ ucc->needs_epib = ep_config->needs_epib;
+ ucc->psd_size = ep_config->psd_size;
+ ucc->metadata_size =
+ (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
+ ucc->psd_size;
+
+ if (ucc->pkt_mode)
+ ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+ ucc->metadata_size, ud->desc_align);
+
+ dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
+ ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
+
+ return true;
+}
+
+static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct udma_dev *ud = ofdma->of_dma_data;
+ dma_cap_mask_t mask = ud->ddev.cap_mask;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = __dma_request_channel(&mask, udma_dma_filter_fn,
+ &dma_spec->args[0], ofdma->of_node);
+ if (!chan) {
+ dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return chan;
+}
+
+static struct udma_match_data am654_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 8, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data am654_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = true, /* TEST: DMA domains */
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 3,
+ .level_start_idx = {
+ [0] = 16, /* Normal channels */
+ [1] = 4, /* High Throughput channels */
+ [2] = 0, /* Ultra High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static const struct of_device_id udma_of_match[] = {
+ {
+ .compatible = "ti,am654-navss-main-udmap",
+ .data = &am654_main_data,
+ },
+ {
+ .compatible = "ti,am654-navss-mcu-udmap",
+ .data = &am654_mcu_data,
+ }, {
+ .compatible = "ti,j721e-navss-main-udmap",
+ .data = &j721e_main_data,
+ }, {
+ .compatible = "ti,j721e-navss-mcu-udmap",
+ .data = &j721e_mcu_data,
+ },
+ { /* Sentinel */ },
+};
+
+static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < MMR_LAST; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mmr_names[i]);
+ ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ud->mmrs[i]))
+ return PTR_ERR(ud->mmrs[i]);
+ }
+
+ return 0;
+}
+
+static int udma_setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret, i, j;
+ u32 cap2, cap3;
+ struct ti_sci_resource_desc *rm_desc;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ static const char * const range_names[] = { "ti,sci-rm-range-tchan",
+ "ti,sci-rm-range-rchan",
+ "ti,sci-rm-range-rflow" };
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ ud->rflow_cnt = cap3 & 0x3fff;
+ ud->tchan_cnt = cap2 & 0x1ff;
+ ud->echan_cnt = (cap2 >> 9) & 0x1ff;
+ ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
+ ch_count = ud->tchan_cnt + ud->rchan_cnt;
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+ BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+ !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+ !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /*
+ * RX flows with the same Ids as RX channels are reserved to be used
+ * as default flows if remote HW can't generate flow_ids. Those
+ * RX flows can be requested only explicitly by id.
+ */
+ bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+ /* by default no GP rflows are assigned to Linux */
+ bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++)
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->tchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* rchan and matching default flow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ irq_res.sets += rm_res->sets;
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->match_data->rchan_oes_offset;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ /* GP rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all gp flows are assigned exclusively to Linux */
+ bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
+ ud->rflow_cnt - ud->rchan_cnt);
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rflow_gp_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
+ ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
+ if (!ch_count)
+ return -ENODEV;
+
+ ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
+ GFP_KERNEL);
+ if (!ud->channels)
+ return -ENOMEM;
+
+ dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+
+ return ch_count;
+}
+
+#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int udma_probe(struct platform_device *pdev)
+{
+ struct device_node *navss_node = pdev->dev.parent->of_node;
+ struct device *dev = &pdev->dev;
+ struct udma_dev *ud;
+ const struct of_device_id *match;
+ int i, ret;
+ int ch_count;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret)
+ dev_err(dev, "failed to set dma mask stuff\n");
+
+ ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
+ if (!ud)
+ return -ENOMEM;
+
+ ret = udma_get_mmrs(pdev, ud);
+ if (ret)
+ return ret;
+
+ ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
+ if (IS_ERR(ud->tisci_rm.tisci))
+ return PTR_ERR(ud->tisci_rm.tisci);
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+ pdev->id = ud->tisci_rm.tisci_dev_id;
+
+ ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_navss_dev_id);
+ if (ret) {
+ dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+
+ ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
+ ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
+
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (IS_ERR(ud->ringacc))
+ return PTR_ERR(ud->ringacc);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ ud->match_data = match->data;
+
+ dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+
+ ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
+ ud->ddev.device_config = udma_slave_config;
+ ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ ud->ddev.device_issue_pending = udma_issue_pending;
+ ud->ddev.device_tx_status = udma_tx_status;
+ ud->ddev.device_pause = udma_pause;
+ ud->ddev.device_resume = udma_resume;
+ ud->ddev.device_terminate_all = udma_terminate_all;
+ ud->ddev.device_synchronize = udma_synchronize;
+
+ ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+ ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
+ DESC_METADATA_ENGINE;
+ if (ud->match_data->enable_memcpy_support) {
+ dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
+ ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
+ }
+
+ ud->ddev.dev = dev;
+ ud->dev = dev;
+ ud->psil_base = ud->match_data->psil_base;
+
+ INIT_LIST_HEAD(&ud->ddev.channels);
+ INIT_LIST_HEAD(&ud->desc_to_purge);
+
+ ch_count = udma_setup_resources(ud);
+ if (ch_count <= 0)
+ return ch_count;
+
+ spin_lock_init(&ud->lock);
+ INIT_WORK(&ud->purge_work, udma_purge_desc_work);
+
+ ud->desc_align = 64;
+ if (ud->desc_align < dma_get_cache_alignment())
+ ud->desc_align = dma_get_cache_alignment();
+
+ for (i = 0; i < ud->tchan_cnt; i++) {
+ struct udma_tchan *tchan = &ud->tchans[i];
+
+ tchan->id = i;
+ tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rchan_cnt; i++) {
+ struct udma_rchan *rchan = &ud->rchans[i];
+
+ rchan->id = i;
+ rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rflow_cnt; i++) {
+ struct udma_rflow *rflow = &ud->rflows[i];
+
+ rflow->id = i;
+ }
+
+ for (i = 0; i < ch_count; i++) {
+ struct udma_chan *uc = &ud->channels[i];
+
+ uc->ud = ud;
+ uc->vc.desc_free = udma_desc_free;
+ uc->id = i;
+ uc->tchan = NULL;
+ uc->rchan = NULL;
+ uc->config.remote_thread_id = -1;
+ uc->config.dir = DMA_MEM_TO_MEM;
+ uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+ dev_name(dev), i);
+
+ vchan_init(&uc->vc, &ud->ddev);
+ /* Use custom vchan completion handling */
+ tasklet_init(&uc->vc.task, udma_vchan_complete,
+ (unsigned long)&uc->vc);
+ init_completion(&uc->teardown_completed);
+ }
+
+ ret = dma_async_device_register(&ud->ddev);
+ if (ret) {
+ dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ud);
+
+ ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
+ if (ret) {
+ dev_err(dev, "failed to register of_dma controller\n");
+ dma_async_device_unregister(&ud->ddev);
+ }
+
+ return ret;
+}
+
+static struct platform_driver udma_driver = {
+ .driver = {
+ .name = "ti-udma",
+ .of_match_table = udma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(udma_driver);
+
+/* Private interfaces to UDMA */
+#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
new file mode 100644
index 000000000000..128d8744a435
--- /dev/null
+++ b/drivers/dma/ti/k3-udma.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_H_
+#define K3_UDMA_H_
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* Global registers */
+#define UDMA_REV_REG 0x0
+#define UDMA_PERF_CTL_REG 0x4
+#define UDMA_EMU_CTL_REG 0x8
+#define UDMA_PSIL_TO_REG 0x10
+#define UDMA_UTC_CTL_REG 0x1c
+#define UDMA_CAP_REG(i) (0x20 + ((i) * 4))
+#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
+#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
+
+/* TX chan RT regs */
+#define UDMA_TCHAN_RT_CTL_REG 0x0
+#define UDMA_TCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_TCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_TCHAN_RT_PEER_BCNT_REG \
+ UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_TCHAN_RT_PEER_RT_EN_REG \
+ UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_TCHAN_RT_PCNT_REG 0x400
+#define UDMA_TCHAN_RT_BCNT_REG 0x408
+#define UDMA_TCHAN_RT_SBCNT_REG 0x410
+
+/* RX chan RT regs */
+#define UDMA_RCHAN_RT_CTL_REG 0x0
+#define UDMA_RCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_RCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_RCHAN_RT_PEER_BCNT_REG \
+ UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_RCHAN_RT_PEER_RT_EN_REG \
+ UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_RCHAN_RT_PCNT_REG 0x400
+#define UDMA_RCHAN_RT_BCNT_REG 0x408
+#define UDMA_RCHAN_RT_SBCNT_REG 0x410
+
+/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */
+#define UDMA_CHAN_RT_CTL_EN BIT(31)
+#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
+#define UDMA_CHAN_RT_CTL_PAUSE BIT(29)
+#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28)
+#define UDMA_CHAN_RT_CTL_ERROR BIT(0)
+
+/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */
+#define UDMA_PEER_RT_EN_ENABLE BIT(31)
+#define UDMA_PEER_RT_EN_TEARDOWN BIT(30)
+#define UDMA_PEER_RT_EN_PAUSE BIT(29)
+#define UDMA_PEER_RT_EN_FLUSH BIT(28)
+#define UDMA_PEER_RT_EN_IDLE BIT(1)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
+ */
+#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24)
+#define PDMA_STATIC_TR_X_SHIFT (24)
+#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0)
+#define PDMA_STATIC_TR_Y_SHIFT (0)
+
+#define PDMA_STATIC_TR_Y(x) \
+ (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK)
+#define PDMA_STATIC_TR_X(x) \
+ (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK)
+
+#define PDMA_STATIC_TR_XY_ACC32 BIT(30)
+#define PDMA_STATIC_TR_XY_BURST BIT(31)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
+ */
+#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+
+struct udma_dev;
+struct udma_tchan;
+struct udma_rchan;
+struct udma_rflow;
+
+enum udma_rm_range {
+ RM_RANGE_TCHAN = 0,
+ RM_RANGE_RCHAN,
+ RM_RANGE_RFLOW,
+ RM_RANGE_LAST,
+};
+
+struct udma_tisci_rm {
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
+ u32 tisci_dev_id;
+
+ /* tisci information for PSI-L thread pairing/unpairing */
+ const struct ti_sci_rm_psil_ops *tisci_psil_ops;
+ u32 tisci_navss_dev_id;
+
+ struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
+};
+
+/* Direct access to UDMA low lever resources for the glue layer */
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+void xudma_dev_put(struct udma_dev *ud);
+u32 xudma_dev_get_psil_base(struct udma_dev *ud);
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+
+struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
+struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id);
+
+void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p);
+void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p);
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p);
+
+int xudma_tchan_get_id(struct udma_tchan *p);
+int xudma_rchan_get_id(struct udma_rchan *p);
+int xudma_rflow_get_id(struct udma_rflow *p);
+
+u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
+void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
+u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
+void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+
+#endif /* K3_UDMA_H_ */
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 256fc662c500..23e33a85f033 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -114,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
struct virt_dma_desc *vd, *_vd;
list_for_each_entry_safe(vd, _vd, head, node) {
- if (dmaengine_desc_test_reuse(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ vchan_vdesc_fini(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -134,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
+ INIT_LIST_HEAD(&vc->desc_terminated);
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index ab158bac03a7..e9f5250fbe4d 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -31,9 +31,9 @@ struct virt_dma_chan {
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
+ struct list_head desc_terminated;
struct virt_dma_desc *cyclic;
- struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- if (dmaengine_desc_test_reuse(&vd->tx))
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
list_add(&vd->node, &vc->desc_allocated);
- else
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
vc->desc_free(vd);
+ }
}
/**
@@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- /* free up stuck descriptor */
- if (vc->vd_terminated)
- vchan_vdesc_fini(vc->vd_terminated);
+ list_add_tail(&vd->node, &vc->desc_terminated);
- vc->vd_terminated = vd;
if (vc->cyclic == vd)
vc->cyclic = NULL;
}
@@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
}
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
@@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ LIST_HEAD(head);
unsigned long flags;
tasklet_kill(&vc->task);
spin_lock_irqsave(&vc->lock, flags);
- if (vc->vd_terminated) {
- vchan_vdesc_fini(vc->vd_terminated);
- vc->vd_terminated = NULL;
- }
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
}
#endif
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9c845c07b107..d47749a35863 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -123,10 +123,12 @@
/* Max transfer size per descriptor */
#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
+
/* Reset values for data attributes */
#define ZYNQMP_DMA_AXCACHE_VAL 0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
{
- u32 val;
+ u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) |
- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+ burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) |
- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
}
@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- chan->src_burst_len = config->src_maxburst;
- chan->dst_burst_len = config->dst_maxburst;
+ chan->src_burst_len = clamp(config->src_maxburst, 1U,
+ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+ ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0;
}
@@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
if (err < 0) {
dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5c8272329a65..b3c99bb5fe77 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -491,8 +491,7 @@ config EDAC_TI
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
help
- Support for error detection and correction on the
- TI SoCs.
+ Support for error detection and correction on the TI SoCs.
config EDAC_QCOM
tristate "QCOM EDAC Controller"
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 428ce98f6776..9fbad908a854 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17 || pvt->fam == 0x18) {
+ if (pvt->umc) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
@@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
- switch (pvt->fam) {
- case 0x15:
- /* Erratum #505 */
- if (pvt->model < 0x10)
- f15h_select_dct(pvt, 0);
-
- if (pvt->model == 0x60)
- amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- break;
-
- case 0x17:
- case 0x18:
+ if (pvt->umc) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
} else {
scrubval = 0;
}
- break;
+ } else if (pvt->fam == 0x15) {
+ /* Erratum #505 */
+ if (pvt->model < 0x10)
+ f15h_select_dct(pvt, 0);
- default:
+ if (pvt->model == 0x60)
+ amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ } else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
- break;
}
scrubval = scrubval & 0x001F;
@@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
+ if (pvt->umc) {
+ if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+ pvt->dram_type = MEM_LRDDR4;
+ else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+ pvt->dram_type = MEM_RDDR4;
+ else
+ pvt->dram_type = MEM_DDR4;
+ return;
+ }
+
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
@@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt)
case 0x16:
goto ddr3;
- case 0x17:
- case 0x18:
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
-
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
@@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
+ [F19_CPUS] = {
+ .ctl_name = "F19h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+ .max_mcs = 8,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
};
/*
@@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
family_types[F17_CPUS].ctl_name = "F18h";
break;
+ case 0x19:
+ fam_type = &family_types[F19_CPUS];
+ pvt->ops = &family_types[F19_CPUS].ops;
+ family_types[F19_CPUS].ctl_name = "F19h";
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
@@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&F3->dev);
- WARN_ON(!mci);
-
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&F3->dev);
if (!mci)
@@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 9be31688110b..abbf3c274d74 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -122,6 +122,8 @@
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
/*
* Function 1 - Address Map
@@ -292,6 +294,7 @@ enum amd_families {
F17_M10H_CPUS,
F17_M30H_CPUS,
F17_M70H_CPUS,
+ F19_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 09a9e3de9595..b194658b8b5c 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci)
if (!np) {
dev_err(mci->pdev, "dt: missing /memory node\n");
return -ENODEV;
- };
+ }
rc = of_address_to_resource(np, 0, &r);
@@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci)
if (rc) {
dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
return rc;
- };
+ }
dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
r.start, resource_size(&r), PAGE_SHIFT);
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index f564a4a8a4ae..5c1eea96230c 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -324,7 +324,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
- window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+ window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE);
if (!window) {
printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
mchbar);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 432b375a4075..a8988db6d423 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -280,7 +280,7 @@ static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 0ddc41e47a96..191aa7c19ded 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a)
return a & ((1 << 16) - 1);
}
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
- return a & ((1 << 18) - 1);
-}
-
static inline u32 i5100_recmema_merr(u32 a)
{
return i5100_nrecmema_merr(a);
@@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 dw;
u32 dw2;
unsigned syndrome = 0;
- unsigned ecc_loc = 0;
unsigned merr;
unsigned bank;
unsigned rank;
@@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
syndrome = dw2;
pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
- ecc_loc = i5100_redmemb_ecc_locator(dw2);
}
if (i5100_validlog_recmemvalid(dw)) {
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 7c6a2d4d2360..6be99e0d850d 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -485,7 +485,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail0;
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
- mch_window = ioremap_nocache(mchbar, 0x1000);
+ mch_window = ioremap(mchbar, 0x1000);
if (!mch_window) {
edac_dbg(3, "error ioremapping MCHBAR!\n");
goto fail0;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4f65073f230b..d68346a8e141 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -357,7 +357,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ea622c6f3a39..ea980c556f2e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -6,7 +6,7 @@
#include "mce_amd.h"
-static struct amd_decoder_ops *fam_ops;
+static struct amd_decoder_ops fam_ops;
static u8 xec_mask = 0xf;
@@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = {
"L2 Fill Data error",
};
+static const char * const smca_ls2_mce_desc[] = {
+ "An ECC error was detected on a data cache read by a probe or victimization",
+ "An ECC error or L2 poison was detected on a data cache read by a load",
+ "An ECC error was detected on a data cache read-modify-write by a store",
+ "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
+ "An ECC error or poison bit mismatch was detected on a tag read by a load",
+ "An ECC error or poison bit mismatch was detected on a tag read by a store",
+ "An ECC error was detected on an EMEM read by a load",
+ "An ECC error was detected on an EMEM read-modify-write by a store",
+ "A parity error was detected in an L1 TLB entry by any access",
+ "A parity error was detected in an L2 TLB entry by any access",
+ "A parity error was detected in a PWC entry by any access",
+ "A parity error was detected in an STQ entry by any access",
+ "A parity error was detected in an LDQ entry by any access",
+ "A parity error was detected in a MAB entry by any access",
+ "A parity error was detected in an SCB entry state field by any access",
+ "A parity error was detected in an SCB entry address field by any access",
+ "A parity error was detected in an SCB entry data field by any access",
+ "A parity error was detected in a WCB entry by any access",
+ "A poisoned line was detected in an SCB entry by any access",
+ "A SystemReadDataError error was reported on read data returned from L2 for a load",
+ "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
+ "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
+ "A hardware assertion error was reported",
+ "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
+};
+
static const char * const smca_if_mce_desc[] = {
"Op Cache Microtag Probe Port Parity Error",
"IC Microtag or Full Tag Multi-hit Error",
@@ -378,6 +405,7 @@ struct smca_mce_desc {
static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
+ [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
[SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
[SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
[SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
@@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m)
: (xec ? "multimatch" : "parity")));
return;
}
- } else if (fam_ops->mc0_mce(ec, xec))
+ } else if (fam_ops.mc0_mce(ec, xec))
;
else
pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
@@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m)
pr_cont("Hardware Assert.\n");
else
goto wrong_mc1_mce;
- } else if (fam_ops->mc1_mce(ec, xec))
+ } else if (fam_ops.mc1_mce(ec, xec))
;
else
goto wrong_mc1_mce;
@@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m)
pr_emerg(HW_ERR "MC2 Error: ");
- if (!fam_ops->mc2_mce(ec, xec))
+ if (!fam_ops.mc2_mce(ec, xec))
pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
@@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->tsc)
pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
- if (!fam_ops)
+ /* Doesn't matter which member to test. */
+ if (!fam_ops.mc0_mce)
goto err_code;
switch (m->bank) {
@@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void)
c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
- fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
- if (!fam_ops)
- return -ENOMEM;
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ xec_mask = 0x3f;
+ goto out;
+ }
switch (c->x86) {
case 0xf:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x10:
- fam_ops->mc0_mce = f10h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f10h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x11:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x12:
- fam_ops->mc0_mce = f12h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f12h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x14:
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
- fam_ops->mc0_mce = f15h_mc0_mce;
- fam_ops->mc1_mce = f15h_mc1_mce;
- fam_ops->mc2_mce = f15h_mc2_mce;
+ fam_ops.mc0_mce = f15h_mc0_mce;
+ fam_ops.mc1_mce = f15h_mc1_mce;
+ fam_ops.mc2_mce = f15h_mc2_mce;
break;
case 0x16:
xec_mask = 0x1f;
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = f16h_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = f16h_mc2_mce;
break;
case 0x17:
case 0x18:
- xec_mask = 0x3f;
- if (!boot_cpu_has(X86_FEATURE_SMCA)) {
- printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
- goto err_out;
- }
- break;
+ pr_warn("Decoding supported only on Scalable MCA processors.\n");
+ return -EINVAL;
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- goto err_out;
+ return -EINVAL;
}
+out:
pr_info("MCE: In-kernel MCE decoding enabled.\n");
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
-
-err_out:
- kfree(fam_ops);
- fam_ops = NULL;
- return -EINVAL;
}
early_initcall(mce_amd_init);
@@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init);
static void __exit mce_amd_exit(void)
{
mce_unregister_decode_chain(&amd_mce_dec_nb);
- kfree(fam_ops);
}
MODULE_DESCRIPTION("AMD MCE decoder");
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index c0cc72a3b2be..3a3dcb14ed99 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
1, 1, NULL, 0,
edac_device_alloc_index());
- if (IS_ERR(p->dci))
- return PTR_ERR(p->dci);
+ if (!p->dci)
+ return -ENOMEM;
p->dci->dev = &pdev->dev;
p->dci->mod_name = "Sifive ECC Manager";
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 95662a4ff4c4..99bbaf629b8d 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
if (!pdev) {
- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+ edac_dbg(2, "Can't get tolm/tohm\n");
return -ENODEV;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index cc779f3f9e2d..a65e2f78a402 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -266,7 +266,7 @@ static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0cc746673677..6ca2f5ab6c57 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -551,7 +551,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
INIT_LIST_HEAD(&lynx->client_list);
kref_init(&lynx->kref);
- lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
+ lynx->registers = ioremap(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
if (lynx->registers == NULL) {
dev_err(&dev->dev, "Failed to map registers\n");
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index da04fdae62a1..835ece9c00f1 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
void __iomem *iobase;
int err;
- iobase = ioremap_nocache(base, lim);
+ iobase = ioremap(base, lim);
if (!iobase)
return -ENOMEM;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index bcc378c19ebe..ecc83e2f032c 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -215,6 +215,28 @@ config EFI_RCI2_TABLE
Say Y here for Dell EMC PowerEdge systems.
+config EFI_DISABLE_PCI_DMA
+ bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
+ help
+ Disable the busmaster bit in the control register on all PCI bridges
+ while calling ExitBootServices() and passing control to the runtime
+ kernel. System firmware may configure the IOMMU to prevent malicious
+ PCI devices from being able to attack the OS via DMA. However, since
+ firmware can't guarantee that the OS is IOMMU-aware, it will tear
+ down IOMMU configuration when ExitBootServices() is called. This
+ leaves a window between where a hostile device could still cause
+ damage before Linux configures the IOMMU again.
+
+ If you say Y here, the EFI stub will clear the busmaster bit on all
+ PCI bridges before ExitBootServices() is called. This will prevent
+ any malicious PCI devices from being able to perform DMA until the
+ kernel reenables busmastering after configuring the IOMMU.
+
+ This option will cause failures with some poorly behaved hardware
+ and should not be enabled without testing. The kernel commandline
+ options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
+ may be used to override this option.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 904fa09e6a6b..d99f5b0c8a09 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -10,10 +10,12 @@
#define pr_fmt(fmt) "efi: " fmt
#include <linux/efi.h>
+#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
@@ -276,15 +278,112 @@ void __init efi_init(void)
efi_memmap_unmap();
}
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+ u64 fb_base = screen_info.lfb_base;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+ return fb_base >= range->cpu_addr &&
+ fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int err;
+
+ err = of_pci_range_parser_init(&parser, np);
+ if (err) {
+ pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+ continue;
+ }
+
+ for_each_of_pci_range(&parser, &range)
+ if (efifb_overlaps_pci_range(&range))
+ return np;
+ }
+ return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ struct device_node *sup_np;
+ struct device *sup_dev;
+
+ sup_np = find_pci_overlap_node();
+
+ /*
+ * If there's no PCI graphics controller backing the efifb, we are
+ * done here.
+ */
+ if (!sup_np)
+ return 0;
+
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ of_node_put(sup_np);
+
+ /*
+ * Return -ENODEV if the PCI graphics controller device hasn't been
+ * registered yet. This ensures that efifb isn't allowed to probe
+ * and this function is retried again when new devices are
+ * registered.
+ */
+ if (!sup_dev)
+ return -ENODEV;
+
+ /*
+ * If this fails, retrying this function at a later point won't
+ * change anything. So, don't return an error after this.
+ */
+ if (!device_link_add(dev, sup_dev, 0))
+ dev_warn(dev, "device_link_add() failed\n");
+
+ put_device(sup_dev);
+
+ return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+ .add_links = efifb_add_links,
+};
+
+static struct fwnode_handle efifb_fwnode = {
+ .ops = &efifb_fwnode_ops,
+};
+
static int __init register_gop_device(void)
{
- void *pd;
+ struct platform_device *pd;
+ int err;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
return 0;
- pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
- &screen_info, sizeof(screen_info));
- return PTR_ERR_OR_ZERO(pd);
+ pd = platform_device_alloc("efi-framebuffer", 0);
+ if (!pd)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_PCI))
+ pd->dev.fwnode = &efifb_fwnode;
+
+ err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
+ if (err)
+ return err;
+
+ return platform_device_add(pd);
}
subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index b1395133389e..d3067cbd5114 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/highmem.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index d4077db6dc97..5d4f84781aa0 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -17,7 +17,7 @@ static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
static u32 efi_x, efi_y;
static u64 fb_base;
-static pgprot_t fb_prot;
+static bool fb_wb;
static void *efi_fb;
/*
@@ -33,10 +33,8 @@ static int __init efi_earlycon_remap_fb(void)
if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
return 0;
- if (pgprot_val(fb_prot) == pgprot_val(PAGE_KERNEL))
- efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WB);
- else
- efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WC);
+ efi_fb = memremap(fb_base, screen_info.lfb_size,
+ fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
return efi_fb ? 0 : -ENOMEM;
}
@@ -53,9 +51,12 @@ late_initcall(efi_earlycon_unmap_fb);
static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
{
+ pgprot_t fb_prot;
+
if (efi_fb)
return efi_fb + start;
+ fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
}
@@ -215,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- if (opt && !strcmp(opt, "ram"))
- fb_prot = PAGE_KERNEL;
- else
- fb_prot = pgprot_writecombine(PAGE_KERNEL);
+ fb_wb = opt && !strcmp(opt, "ram");
si = &screen_info;
xres = si->lfb_width;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2b02cb165f16..621220ab3d0e 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr)
*
* Search in the EFI memory map for the region covering @phys_addr.
* Returns the EFI memory type if the region was found in the memory
- * map, EFI_RESERVED_TYPE (zero) otherwise.
+ * map, -EINVAL otherwise.
*/
int efi_mem_type(unsigned long phys_addr)
{
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index bb9fc70d0cfa..6e0f34a38171 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
return 0;
}
-void __init efi_fake_memmap(void)
+static void __init efi_fake_range(struct efi_mem_range *efi_range)
{
+ struct efi_memory_map_data data = { 0 };
int new_nr_map = efi.memmap.nr_map;
efi_memory_desc_t *md;
- phys_addr_t new_memmap_phy;
void *new_memmap;
- int i;
-
- if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
- return;
/* count up the number of EFI memory descriptor */
- for (i = 0; i < nr_fake_mem; i++) {
- for_each_efi_memory_desc(md) {
- struct range *r = &efi_fake_mems[i].range;
-
- new_nr_map += efi_memmap_split_count(md, r);
- }
- }
+ for_each_efi_memory_desc(md)
+ new_nr_map += efi_memmap_split_count(md, &efi_range->range);
/* allocate memory for new EFI memmap */
- new_memmap_phy = efi_memmap_alloc(new_nr_map);
- if (!new_memmap_phy)
+ if (efi_memmap_alloc(new_nr_map, &data) != 0)
return;
/* create new EFI memmap */
- new_memmap = early_memremap(new_memmap_phy,
- efi.memmap.desc_size * new_nr_map);
+ new_memmap = early_memremap(data.phys_map, data.size);
if (!new_memmap) {
- memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
+ __efi_memmap_free(data.phys_map, data.size, data.flags);
return;
}
- for (i = 0; i < nr_fake_mem; i++)
- efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
+ efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
/* swap into new EFI memmap */
- early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+ early_memunmap(new_memmap, data.size);
+
+ efi_memmap_install(&data);
+}
+
+void __init efi_fake_memmap(void)
+{
+ int i;
- efi_memmap_install(new_memmap_phy, new_nr_map);
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ for (i = 0; i < nr_fake_mem; i++)
+ efi_fake_range(&efi_fake_mems[i]);
/* print new EFI memmap */
efi_print_memmap();
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c35f893897e1..98a81576213d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
- random.o
+ random.o pci.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 817237ce2420..7bbef4a67350 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -37,16 +37,14 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-void efi_char16_printk(efi_system_table_t *sys_table_arg,
- efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
+static efi_system_table_t *__efistub_global sys_table;
- out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
- out->output_string(out, str);
+__pure efi_system_table_t *efi_system_table(void)
+{
+ return sys_table;
}
-static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+static struct screen_info *setup_graphics(void)
{
efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
efi_status_t status;
@@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
struct screen_info *si = NULL;
size = 0;
- status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info(sys_table_arg);
+ si = alloc_screen_info();
if (!si)
return NULL;
- efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+ efi_setup_gop(si, &gop_proto, size);
}
return si;
}
-void install_memreserve_table(efi_system_table_t *sys_table_arg)
+void install_memreserve_table(void)
{
struct linux_efi_memreserve *rsv;
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
- (void **)&rsv);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+ pr_efi_err("Failed to allocate memreserve entry!\n");
return;
}
@@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
rsv->size = 0;
atomic_set(&rsv->count, 0);
- status = efi_call_early(install_configuration_table,
- &memreserve_table_guid,
- rsv);
+ status = efi_bs_call(install_configuration_table,
+ &memreserve_table_guid, rsv);
if (status != EFI_SUCCESS)
- pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+ pr_efi_err("Failed to install memreserve config table!\n");
}
@@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
+unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
unsigned long *image_addr)
{
efi_loaded_image_t *image;
@@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
enum efi_secureboot_mode secure_boot;
struct screen_info *si;
+ sys_table = sys_table_arg;
+
/* Check if we were booted by the EFI firmware */
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
- status = check_platform_features(sys_table);
+ status = check_platform_features();
if (status != EFI_SUCCESS)
goto fail;
@@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
status = sys_table->boottime->handle_protocol(handle,
&loaded_image_proto, (void *)&image);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to get loaded image protocol\n");
+ pr_efi_err("Failed to get loaded image protocol\n");
goto fail;
}
- dram_base = get_dram_base(sys_table);
+ dram_base = get_dram_base();
if (dram_base == EFI_ERROR) {
- pr_efi_err(sys_table, "Failed to find DRAM base\n");
+ pr_efi_err("Failed to find DRAM base\n");
goto fail;
}
@@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
if (!cmdline_ptr) {
- pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
+ pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
goto fail;
}
@@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
efi_parse_options(cmdline_ptr);
- pr_efi(sys_table, "Booting Linux Kernel...\n");
+ pr_efi("Booting Linux Kernel...\n");
- si = setup_graphics(sys_table);
+ si = setup_graphics();
- status = handle_kernel_image(sys_table, image_addr, &image_size,
+ status = handle_kernel_image(image_addr, &image_size,
&reserve_addr,
&reserve_size,
dram_base, image);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ pr_efi_err("Failed to relocate kernel\n");
goto fail_free_cmdline;
}
- efi_retrieve_tpm2_eventlog(sys_table);
+ efi_retrieve_tpm2_eventlog();
/* Ask the firmware to clear memory on unclean shutdown */
- efi_enable_reset_attack_mitigation(sys_table);
+ efi_enable_reset_attack_mitigation();
- secure_boot = efi_get_secureboot(sys_table);
+ secure_boot = efi_get_secureboot();
/*
* Unauthenticated device tree data is a security hazard, so ignore
@@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
secure_boot != efi_secureboot_mode_disabled) {
if (strstr(cmdline_ptr, "dtb="))
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ pr_efi("Ignoring DTB from command line.\n");
} else {
- status = handle_cmdline_files(sys_table, image, cmdline_ptr,
- "dtb=",
+ status = handle_cmdline_files(image, cmdline_ptr, "dtb=",
~0UL, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to load device tree!\n");
+ pr_efi_err("Failed to load device tree!\n");
goto fail_free_image;
}
}
if (fdt_addr) {
- pr_efi(sys_table, "Using DTB from command line\n");
+ pr_efi("Using DTB from command line\n");
} else {
/* Look for a device tree configuration table entry. */
- fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size);
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
if (fdt_addr)
- pr_efi(sys_table, "Using DTB from configuration table\n");
+ pr_efi("Using DTB from configuration table\n");
}
if (!fdt_addr)
- pr_efi(sys_table, "Generating empty DTB\n");
+ pr_efi("Generating empty DTB\n");
- status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=",
+ status = handle_cmdline_files(image, cmdline_ptr, "initrd=",
efi_get_max_initrd_addr(dram_base,
*image_addr),
(unsigned long *)&initrd_addr,
(unsigned long *)&initrd_size);
if (status != EFI_SUCCESS)
- pr_efi_err(sys_table, "Failed initrd from command line!\n");
+ pr_efi_err("Failed initrd from command line!\n");
- efi_random_get_seed(sys_table);
+ efi_random_get_seed();
/* hibernation expects the runtime regions to stay in the same place */
if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
@@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
EFI_RT_VIRTUAL_SIZE;
u32 rnd;
- status = efi_get_random_bytes(sys_table, sizeof(rnd),
- (u8 *)&rnd);
+ status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd);
if (status == EFI_SUCCESS) {
virtmap_base = EFI_RT_VIRTUAL_BASE +
(((headroom >> 21) * rnd) >> (32 - 21));
}
}
- install_memreserve_table(sys_table);
+ install_memreserve_table();
new_fdt_addr = fdt_addr;
- status = allocate_new_fdt_and_exit_boot(sys_table, handle,
+ status = allocate_new_fdt_and_exit_boot(handle,
&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
initrd_addr, initrd_size, cmdline_ptr,
fdt_addr, fdt_size);
@@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status == EFI_SUCCESS)
return new_fdt_addr;
- pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n");
+ pr_efi_err("Failed to update FDT and exit boot services\n");
- efi_free(sys_table, initrd_size, initrd_addr);
- efi_free(sys_table, fdt_size, fdt_addr);
+ efi_free(initrd_size, initrd_addr);
+ efi_free(fdt_size, fdt_addr);
fail_free_image:
- efi_free(sys_table, image_size, *image_addr);
- efi_free(sys_table, reserve_size, reserve_addr);
+ efi_free(image_size, *image_addr);
+ efi_free(reserve_size, reserve_addr);
fail_free_cmdline:
- free_screen_info(sys_table, si);
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
+ free_screen_info(si);
+ efi_free(cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 4566640de650..7b2a6382b647 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -7,7 +7,7 @@
#include "efistub.h"
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
{
int block;
@@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
/* LPAE kernels need compatible hardware */
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
- pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+ pr_efi_err("This LPAE kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
@@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+struct screen_info *alloc_screen_info(void)
{
struct screen_info *si;
efi_status_t status;
@@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
* its contents while we hand over to the kernel proper from the
* decompressor.
*/
- status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*si), (void **)&si);
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*si), (void **)&si);
if (status != EFI_SUCCESS)
return NULL;
- status = efi_call_early(install_configuration_table,
- &screen_info_guid, si);
+ status = efi_bs_call(install_configuration_table,
+ &screen_info_guid, si);
if (status == EFI_SUCCESS)
return si;
- efi_call_early(free_pool, si);
+ efi_bs_call(free_pool, si);
return NULL;
}
-void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+void free_screen_info(struct screen_info *si)
{
if (!si)
return;
- efi_call_early(install_configuration_table, &screen_info_guid, NULL);
- efi_call_early(free_pool, si);
+ efi_bs_call(install_configuration_table, &screen_info_guid, NULL);
+ efi_bs_call(free_pool, si);
}
-static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
- unsigned long dram_base,
+static efi_status_t reserve_kernel_base(unsigned long dram_base,
unsigned long *reserve_addr,
unsigned long *reserve_size)
{
@@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
*/
alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE;
nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
- EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
if (status == EFI_SUCCESS) {
if (alloc_addr == dram_base) {
*reserve_addr = alloc_addr;
@@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
* released to the OS after ExitBootServices(), the decompressor can
* safely overwrite them.
*/
- status = efi_get_memory_map(sys_table_arg, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg,
- "reserve_kernel_base(): Unable to retrieve memory map.\n");
+ pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
return status;
}
@@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
start = max(start, (u64)dram_base);
end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE);
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- (end - start) / EFI_PAGE_SIZE,
- &start);
+ status = efi_bs_call(allocate_pages,
+ EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ (end - start) / EFI_PAGE_SIZE,
+ &start);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg,
- "reserve_kernel_base(): alloc failed.\n");
+ pr_efi_err("reserve_kernel_base(): alloc failed.\n");
goto out;
}
break;
@@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
status = EFI_SUCCESS;
out:
- efi_call_early(free_pool, memory_map);
+ efi_bs_call(free_pool, memory_map);
return status;
}
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
*/
kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
- status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
- reserve_size);
+ status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
+ pr_efi_err("Unable to allocate memory for uncompressed kernel.\n");
return status;
}
@@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* memory window.
*/
*image_size = image->image_size;
- status = efi_relocate_kernel(sys_table, image_addr, *image_size,
- *image_size,
+ status = efi_relocate_kernel(image_addr, *image_size, *image_size,
kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel.\n");
- efi_free(sys_table, *reserve_size, *reserve_addr);
+ pr_efi_err("Failed to relocate kernel.\n");
+ efi_free(*reserve_size, *reserve_addr);
*reserve_size = 0;
return status;
}
@@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* address at which the zImage is loaded.
*/
if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
- pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
- efi_free(sys_table, *reserve_size, *reserve_addr);
+ pr_efi_err("Failed to relocate kernel, no low memory available.\n");
+ efi_free(*reserve_size, *reserve_addr);
*reserve_size = 0;
- efi_free(sys_table, *image_size, *image_addr);
+ efi_free(*image_size, *image_addr);
*image_size = 0;
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 1550d244e996..2915b44132e6 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -21,7 +21,7 @@
#include "efistub.h"
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
{
u64 tg;
@@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
- pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+ pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
- pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+ pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
}
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
if (!nokaslr()) {
- status = efi_get_random_bytes(sys_table_arg,
- sizeof(phys_seed),
+ status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
} else if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+ pr_efi_err("efi_get_random_bytes() failed\n");
return status;
}
} else {
- pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+ pr_efi("KASLR disabled on kernel command line\n");
}
}
@@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
* locate the kernel at a randomized offset in physical memory.
*/
*reserve_size = kernel_memsize + offset;
- status = efi_random_alloc(sys_table_arg, *reserve_size,
+ status = efi_random_alloc(*reserve_size,
MIN_KIMG_ALIGN, reserve_addr,
(u32)phys_seed);
@@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
*image_addr = *reserve_addr = preferred_offset;
*reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- *reserve_size / EFI_PAGE_SIZE,
- (efi_physical_addr_t *)reserve_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ *reserve_size / EFI_PAGE_SIZE,
+ (efi_physical_addr_t *)reserve_addr);
}
if (status != EFI_SUCCESS) {
*reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, *reserve_size,
+ status = efi_low_alloc(*reserve_size,
MIN_KIMG_ALIGN, reserve_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ pr_efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e02579907f2e..74ddfb496140 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -27,24 +27,26 @@
*/
#define EFI_READ_CHUNK_SIZE (1024 * 1024)
-static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE;
-static int __section(.data) __nokaslr;
-static int __section(.data) __quiet;
-static int __section(.data) __novamap;
-static bool __section(.data) efi_nosoftreserve;
+static bool __efistub_global efi_nokaslr;
+static bool __efistub_global efi_quiet;
+static bool __efistub_global efi_novamap;
+static bool __efistub_global efi_nosoftreserve;
+static bool __efistub_global efi_disable_pci_dma =
+ IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
-int __pure nokaslr(void)
+bool __pure nokaslr(void)
{
- return __nokaslr;
+ return efi_nokaslr;
}
-int __pure is_quiet(void)
+bool __pure is_quiet(void)
{
- return __quiet;
+ return efi_quiet;
}
-int __pure novamap(void)
+bool __pure novamap(void)
{
- return __novamap;
+ return efi_novamap;
}
bool __pure __efi_soft_reserve_enabled(void)
{
@@ -58,7 +60,7 @@ struct file_info {
u64 size;
};
-void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+void efi_printk(char *str)
{
char *s8;
@@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
ch[0] = *s8;
if (*s8 == '\n') {
efi_char16_t nl[2] = { '\r', 0 };
- efi_char16_printk(sys_table_arg, nl);
+ efi_char16_printk(nl);
}
- efi_char16_printk(sys_table_arg, ch);
+ efi_char16_printk(ch);
}
}
@@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size,
return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
}
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
@@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
*map->map_size = *map->desc_size * 32;
*map->buff_size = *map->map_size;
again:
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- *map->map_size, (void **)&m);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ *map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
*map->desc_size = 0;
key = 0;
- status = efi_call_early(get_memory_map, map->map_size, m,
- &key, map->desc_size, &desc_version);
+ status = efi_bs_call(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL ||
!mmap_has_headroom(*map->buff_size, *map->map_size,
*map->desc_size)) {
- efi_call_early(free_pool, m);
+ efi_bs_call(free_pool, m);
/*
* Make sure there is some entries of headroom so that the
* buffer can be reused for a new map after allocations are
@@ -122,7 +123,7 @@ again:
}
if (status != EFI_SUCCESS)
- efi_call_early(free_pool, m);
+ efi_bs_call(free_pool, m);
if (map->key_ptr && status == EFI_SUCCESS)
*map->key_ptr = key;
@@ -135,7 +136,7 @@ fail:
}
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+unsigned long get_dram_base(void)
{
efi_status_t status;
unsigned long map_size, buff_size;
@@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
return membase;
@@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
}
}
- efi_call_early(free_pool, map.map);
+ efi_bs_call(free_pool, map.map);
return membase;
}
@@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
/*
* Allocate at the highest possible address that is not above 'max'.
*/
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
unsigned long map_size, desc_size, buff_size;
@@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -251,9 +251,8 @@ again:
if (!max_addr)
status = EFI_NOT_FOUND;
else {
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &max_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &max_addr);
if (status != EFI_SUCCESS) {
max = max_addr;
max_addr = 0;
@@ -263,7 +262,7 @@ again:
*addr = max_addr;
}
- efi_call_early(free_pool, map);
+ efi_bs_call(free_pool, map);
fail:
return status;
}
@@ -271,8 +270,7 @@ fail:
/*
* Allocate at the lowest possible address that is not below 'min'.
*/
-efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
unsigned long map_size, desc_size, buff_size;
@@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if ((start + size) > end)
continue;
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &start);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
if (status == EFI_SUCCESS) {
*addr = start;
break;
@@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if (i == map_size / desc_size)
status = EFI_NOT_FOUND;
- efi_call_early(free_pool, map);
+ efi_bs_call(free_pool, map);
fail:
return status;
}
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
- unsigned long addr)
+void efi_free(unsigned long size, unsigned long addr)
{
unsigned long nr_pages;
@@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
return;
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- efi_call_early(free_pages, addr, nr_pages);
+ efi_bs_call(free_pages, addr, nr_pages);
}
-static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
- efi_char16_t *filename_16, void **handle,
- u64 *file_sz)
+static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16,
+ void **handle, u64 *file_sz)
{
efi_file_handle_t *h, *fh = __fh;
efi_file_info_t *info;
@@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
efi_guid_t info_guid = EFI_FILE_INFO_ID;
unsigned long info_sz;
- status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
+ status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
+ efi_printk("Failed to open file: ");
+ efi_char16_printk(filename_16);
+ efi_printk("\n");
return status;
}
*handle = h;
info_sz = 0;
- status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
- &info_sz, NULL);
+ status = h->get_info(h, &info_guid, &info_sz, NULL);
if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
+ efi_printk("Failed to get file info size\n");
return status;
}
grow:
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- info_sz, (void **)&info);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz,
+ (void **)&info);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+ efi_printk("Failed to alloc mem for file info\n");
return status;
}
- status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
- &info_sz, info);
+ status = h->get_info(h, &info_guid, &info_sz, info);
if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_early(free_pool, info);
+ efi_bs_call(free_pool, info);
goto grow;
}
*file_sz = info->file_size;
- efi_call_early(free_pool, info);
+ efi_bs_call(free_pool, info);
if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to get initrd info\n");
+ efi_printk("Failed to get initrd info\n");
return status;
}
-static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+static efi_status_t efi_file_read(efi_file_handle_t *handle,
+ unsigned long *size, void *addr)
{
- return efi_call_proto(efi_file_handle, read, handle, size, addr);
+ return handle->read(handle, size, addr);
}
-static efi_status_t efi_file_close(void *handle)
+static efi_status_t efi_file_close(efi_file_handle_t *handle)
{
- return efi_call_proto(efi_file_handle, close, handle);
+ return handle->close(handle);
}
-static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
efi_file_handle_t **__fh)
{
efi_file_io_interface_t *io;
efi_file_handle_t *fh;
efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
efi_status_t status;
- void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
- device_handle,
- image);
+ efi_handle_t handle = image->device_handle;
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
+ status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ efi_printk("Failed to handle fs_proto\n");
return status;
}
- status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ status = io->open_volume(io, &fh);
if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
+ efi_printk("Failed to open volume\n");
else
*__fh = fh;
@@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline)
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- __nokaslr = 1;
+ efi_nokaslr = true;
str = strstr(cmdline, "quiet");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- __quiet = 1;
+ efi_quiet = true;
/*
* If no EFI parameters were specified on the cmdline we've got
@@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline)
while (*str && *str != ' ') {
if (!strncmp(str, "nochunk", 7)) {
str += strlen("nochunk");
- __chunk_size = -1UL;
+ efi_chunk_size = -1UL;
}
if (!strncmp(str, "novamap", 7)) {
str += strlen("novamap");
- __novamap = 1;
+ efi_novamap = true;
}
if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
!strncmp(str, "nosoftreserve", 7)) {
str += strlen("nosoftreserve");
- efi_nosoftreserve = 1;
+ efi_nosoftreserve = true;
+ }
+
+ if (!strncmp(str, "disable_early_pci_dma", 21)) {
+ str += strlen("disable_early_pci_dma");
+ efi_disable_pci_dma = true;
+ }
+
+ if (!strncmp(str, "no_disable_early_pci_dma", 24)) {
+ str += strlen("no_disable_early_pci_dma");
+ efi_disable_pci_dma = false;
}
/* Group words together, delimited by "," */
@@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline)
* We only support loading a file from the same filesystem as
* the kernel image.
*/
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
char *cmd_line, char *option_string,
unsigned long max_addr,
unsigned long *load_addr,
@@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
if (!nr_files)
return EFI_SUCCESS;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- nr_files * sizeof(*files), (void **)&files);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ nr_files * sizeof(*files), (void **)&files);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n");
+ pr_efi_err("Failed to alloc mem for file handle list\n");
goto fail;
}
@@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image, &fh);
+ status = efi_open_volume(image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
- status = efi_file_size(sys_table_arg, fh, filename_16,
- (void **)&file->handle, &file->size);
+ status = efi_file_size(fh, filename_16, (void **)&file->handle,
+ &file->size);
if (status != EFI_SUCCESS)
goto close_handles;
@@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
* so allocate enough memory for all the files. This is used
* for loading multiple files.
*/
- status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
- &file_addr, max_addr);
+ status = efi_high_alloc(file_size_total, 0x1000, &file_addr,
+ max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n");
+ pr_efi_err("Failed to alloc highmem for files\n");
goto close_handles;
}
/* We've run out of free low memory. */
if (file_addr > max_addr) {
- pr_efi_err(sys_table_arg, "We've run out of free low memory\n");
+ pr_efi_err("We've run out of free low memory\n");
status = EFI_INVALID_PARAMETER;
goto free_file_total;
}
@@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
while (size) {
unsigned long chunksize;
- if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
- chunksize = __chunk_size;
+ if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size)
+ chunksize = efi_chunk_size;
else
chunksize = size;
@@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
&chunksize,
(void *)addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to read file\n");
+ pr_efi_err("Failed to read file\n");
goto free_file_total;
}
addr += chunksize;
@@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
}
- efi_call_early(free_pool, files);
+ efi_bs_call(free_pool, files);
*load_addr = file_addr;
*load_size = file_size_total;
@@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
return status;
free_file_total:
- efi_free(sys_table_arg, file_size_total, file_addr);
+ efi_free(file_size_total, file_addr);
close_handles:
for (k = j; k < i; k++)
efi_file_close(files[k].handle);
free_files:
- efi_call_early(free_pool, files);
+ efi_bs_call(free_pool, files);
fail:
*load_addr = 0;
*load_size = 0;
@@ -707,8 +705,7 @@ fail:
* address is not available the lowest available address will
* be used.
*/
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
@@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &efi_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
new_addr = efi_addr;
/*
* If preferred address allocation failed allocate as low as
* possible.
*/
if (status != EFI_SUCCESS) {
- status = efi_low_alloc_above(sys_table_arg, alloc_size,
- alignment, &new_addr, min_addr);
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
}
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
+ pr_efi_err("Failed to allocate usable memory for kernel.\n");
return status;
}
@@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+char *efi_convert_cmdline(efi_loaded_image_t *image,
int *cmd_line_len)
{
const u16 *s2;
@@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_high_alloc(sys_table_arg, options_bytes, 0,
- &cmdline_addr, MAX_CMDLINE_ADDRESS);
+ status = efi_high_alloc(options_bytes, 0, &cmdline_addr,
+ MAX_CMDLINE_ADDRESS);
if (status != EFI_SUCCESS)
return NULL;
@@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
*/
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
- void *handle,
+efi_status_t efi_exit_boot_services(void *handle,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func)
{
efi_status_t status;
- status = efi_get_memory_map(sys_table_arg, map);
+ status = efi_get_memory_map(map);
if (status != EFI_SUCCESS)
goto fail;
- status = priv_func(sys_table_arg, map, priv);
+ status = priv_func(map, priv);
if (status != EFI_SUCCESS)
goto free_map;
- status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ if (efi_disable_pci_dma)
+ efi_pci_disable_bridge_busmaster();
+
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
if (status == EFI_INVALID_PARAMETER) {
/*
@@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
* to get_memory_map() is expected to succeed here.
*/
*map->map_size = *map->buff_size;
- status = efi_call_early(get_memory_map,
- map->map_size,
- *map->map,
- map->key_ptr,
- map->desc_size,
- map->desc_ver);
+ status = efi_bs_call(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
- status = priv_func(sys_table_arg, map, priv);
+ status = priv_func(map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
- status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
}
/* exit_boot_services() was called, thus cannot free */
@@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
return EFI_SUCCESS;
free_map:
- efi_call_early(free_pool, *map->map);
+ efi_bs_call(free_pool, *map->map);
fail:
return status;
}
-#define GET_EFI_CONFIG_TABLE(bits) \
-static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
- efi_guid_t guid) \
-{ \
- efi_system_table_##bits##_t *sys_table; \
- efi_config_table_##bits##_t *tables; \
- int i; \
- \
- sys_table = (typeof(sys_table))_sys_table; \
- tables = (typeof(tables))(unsigned long)sys_table->tables; \
- \
- for (i = 0; i < sys_table->nr_tables; i++) { \
- if (efi_guidcmp(tables[i].guid, guid) != 0) \
- continue; \
- \
- return (void *)(unsigned long)tables[i].table; \
- } \
- \
- return NULL; \
+void *get_efi_config_table(efi_guid_t guid)
+{
+ unsigned long tables = efi_table_attr(efi_system_table(), tables);
+ int nr_tables = efi_table_attr(efi_system_table(), nr_tables);
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ efi_config_table_t *t = (void *)tables;
+
+ if (efi_guidcmp(t->guid, guid) == 0)
+ return efi_table_attr(t, table);
+
+ tables += efi_is_native() ? sizeof(efi_config_table_t)
+ : sizeof(efi_config_table_32_t);
+ }
+ return NULL;
}
-GET_EFI_CONFIG_TABLE(32)
-GET_EFI_CONFIG_TABLE(64)
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+void efi_char16_printk(efi_char16_t *str)
{
- if (efi_is_64bit())
- return get_efi_config_table64(sys_table, guid);
- else
- return get_efi_config_table32(sys_table, guid);
+ efi_call_proto(efi_table_attr(efi_system_table(), con_out),
+ output_string, str);
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 05739ae013c8..c244b165005e 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -25,22 +25,30 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
-extern int __pure nokaslr(void);
-extern int __pure is_quiet(void);
-extern int __pure novamap(void);
+#ifdef CONFIG_ARM
+#define __efistub_global __section(.data)
+#else
+#define __efistub_global
+#endif
+
+extern bool __pure nokaslr(void);
+extern bool __pure is_quiet(void);
+extern bool __pure novamap(void);
+
+extern __pure efi_system_table_t *efi_system_table(void);
-#define pr_efi(sys_table, msg) do { \
- if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
+#define pr_efi(msg) do { \
+ if (!is_quiet()) efi_printk("EFI stub: "msg); \
} while (0)
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
+#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
-void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
+unsigned long get_dram_base(void);
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
- void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
@@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size);
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size);
+void *get_fdt(unsigned long *fdt_size);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
- unsigned long size, u8 *out);
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed);
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t check_platform_features(void);
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+void *get_efi_config_table(efi_guid_t guid);
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
@@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
#endif
+#define get_efi_var(name, vendor, ...) \
+ efi_rt_call(get_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_rt_call(set_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0bf0190917e0..0a91e5232127 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,7 +16,7 @@
#define EFI_DT_ADDR_CELLS_DEFAULT 2
#define EFI_DT_SIZE_CELLS_DEFAULT 2
-static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+static void fdt_update_cell_size(void *fdt)
{
int offset;
@@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
}
-static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
+static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
u64 initrd_addr, u64 initrd_size)
{
@@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
- pr_efi_err(sys_table, "Device Tree header not valid!\n");
+ pr_efi_err("Device Tree header not valid!\n");
return EFI_LOAD_ERROR;
}
/*
@@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
* configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
- pr_efi_err(sys_table, "Truncated device tree! foo!\n");
+ pr_efi_err("Truncated device tree! foo!\n");
return EFI_LOAD_ERROR;
}
}
@@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
* Any failure from the following function is
* non-critical:
*/
- fdt_update_cell_size(sys_table, fdt);
+ fdt_update_cell_size(fdt);
}
}
@@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table());
status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
@@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
efi_status_t efi_status;
- efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+ efi_status = efi_get_random_bytes(sizeof(fdt_val64),
(u8 *)&fdt_val64);
if (efi_status == EFI_SUCCESS) {
status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
@@ -210,8 +209,7 @@ struct exit_boot_struct {
void *new_fdt_addr;
};
-static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map,
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
void *priv)
{
struct exit_boot_struct *p = priv;
@@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
* with the final memory map in it.
*/
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
- void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
@@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
- status = efi_get_memory_map(sys_table, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+ pr_efi_err("Unable to retrieve UEFI memory map.\n");
return status;
}
- pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
+ pr_efi("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
- status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
+ status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN,
new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
+ pr_efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* Now that we have done our final memory allocation (and free)
* we can get the memory map key needed for exit_boot_services().
*/
- status = efi_get_memory_map(sys_table, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
- status = update_fdt(sys_table, (void *)fdt_addr, fdt_size,
+ status = update_fdt((void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
initrd_addr, initrd_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to construct new device tree.\n");
+ pr_efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
@@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
priv.runtime_entry_count = &runtime_entry_count;
priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
@@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return EFI_SUCCESS;
/* Install the new virtual address map */
- svam = sys_table->runtime->set_virtual_address_map;
+ svam = efi_system_table()->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
desc_ver, runtime_map);
@@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return EFI_SUCCESS;
}
- pr_efi_err(sys_table, "Exit boot services failed.\n");
+ pr_efi_err("Exit boot services failed.\n");
fail_free_new_fdt:
- efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr);
+ efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- sys_table->boottime->free_pool(runtime_map);
+ efi_system_table()->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
+void *get_fdt(unsigned long *fdt_size)
{
void *fdt;
- fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
return NULL;
if (fdt_check_header(fdt) != 0) {
- pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index b7bf1e993b8b..55e6b3f286fe 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -10,6 +10,8 @@
#include <asm/efi.h>
#include <asm/setup.h>
+#include "efistub.h"
+
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
{
u8 first, len;
@@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
static void
setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
- struct efi_pixel_bitmask pixel_info, int pixel_format)
+ efi_pixel_bitmask_t pixel_info, int pixel_format)
{
if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
si->lfb_depth = 32;
@@ -83,48 +85,42 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_status_t
-setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
- efi_guid_t *proto, unsigned long size, void **gop_handle)
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
{
- struct efi_graphics_output_protocol_32 *gop32, *first_gop;
- unsigned long nr_gops;
+ efi_graphics_output_protocol_t *gop, *first_gop;
u16 width, height;
u32 pixels_per_scan_line;
u32 ext_lfb_base;
- u64 fb_base;
- struct efi_pixel_bitmask pixel_info;
+ efi_physical_addr_t fb_base;
+ efi_pixel_bitmask_t pixel_info;
int pixel_format;
efi_status_t status;
- u32 *handles = (u32 *)(unsigned long)gop_handle;
+ efi_handle_t h;
int i;
first_gop = NULL;
- gop32 = NULL;
+ gop = NULL;
- nr_gops = size / sizeof(u32);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_protocol_mode_32 *mode;
- struct efi_graphics_output_mode_info *info = NULL;
+ for_each_efi_handle(h, handles, size, i) {
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
void *dummy = NULL;
- efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
- u64 current_fb_base;
+ efi_physical_addr_t current_fb_base;
- status = efi_call_early(handle_protocol, h,
- proto, (void **)&gop32);
+ status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
if (status != EFI_SUCCESS)
continue;
- status = efi_call_early(handle_protocol, h,
- &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
if (status == EFI_SUCCESS)
conout_found = true;
- mode = (void *)(unsigned long)gop32->mode;
- info = (void *)(unsigned long)mode->info;
- current_fb_base = mode->frame_buffer_base;
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ current_fb_base = efi_table_attr(mode, frame_buffer_base);
if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
@@ -146,104 +142,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- first_gop = gop32;
- if (conout_found)
- break;
- }
- }
-
- /* Did we find any GOPs? */
- if (!first_gop)
- return EFI_NOT_FOUND;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_width = width;
- si->lfb_height = height;
- si->lfb_base = fb_base;
-
- ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
- if (ext_lfb_base) {
- si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- si->ext_lfb_base = ext_lfb_base;
- }
-
- si->pages = 1;
-
- setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
- si->lfb_size = si->lfb_linelength * si->lfb_height;
-
- si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-
- return EFI_SUCCESS;
-}
-
-static efi_status_t
-setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
- efi_guid_t *proto, unsigned long size, void **gop_handle)
-{
- struct efi_graphics_output_protocol_64 *gop64, *first_gop;
- unsigned long nr_gops;
- u16 width, height;
- u32 pixels_per_scan_line;
- u32 ext_lfb_base;
- u64 fb_base;
- struct efi_pixel_bitmask pixel_info;
- int pixel_format;
- efi_status_t status;
- u64 *handles = (u64 *)(unsigned long)gop_handle;
- int i;
-
- first_gop = NULL;
- gop64 = NULL;
-
- nr_gops = size / sizeof(u64);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_protocol_mode_64 *mode;
- struct efi_graphics_output_mode_info *info = NULL;
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
- bool conout_found = false;
- void *dummy = NULL;
- efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
- u64 current_fb_base;
-
- status = efi_call_early(handle_protocol, h,
- proto, (void **)&gop64);
- if (status != EFI_SUCCESS)
- continue;
-
- status = efi_call_early(handle_protocol, h,
- &conout_proto, &dummy);
- if (status == EFI_SUCCESS)
- conout_found = true;
-
- mode = (void *)(unsigned long)gop64->mode;
- info = (void *)(unsigned long)mode->info;
- current_fb_base = mode->frame_buffer_base;
-
- if ((!first_gop || conout_found) &&
- info->pixel_format != PIXEL_BLT_ONLY) {
- /*
- * Systems that use the UEFI Console Splitter may
- * provide multiple GOP devices, not all of which are
- * backed by real hardware. The workaround is to search
- * for a GOP implementing the ConOut protocol, and if
- * one isn't found, to just fall back to the first GOP.
- */
- width = info->horizontal_resolution;
- height = info->vertical_resolution;
- pixel_format = info->pixel_format;
- pixel_info = info->pixel_information;
- pixels_per_scan_line = info->pixels_per_scan_line;
- fb_base = current_fb_base;
-
- /*
- * Once we've found a GOP supporting ConOut,
- * don't bother looking any further.
- */
- first_gop = gop64;
+ first_gop = gop;
if (conout_found)
break;
}
@@ -280,33 +179,25 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/*
* See if we have Graphics Output Protocol
*/
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
- struct screen_info *si, efi_guid_t *proto,
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size)
{
efi_status_t status;
void **gop_handle = NULL;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- size, (void **)&gop_handle);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&gop_handle);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_early(locate_handle,
- EFI_LOCATE_BY_PROTOCOL,
- proto, NULL, &size, gop_handle);
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
+ &size, gop_handle);
if (status != EFI_SUCCESS)
goto free_handle;
- if (efi_is_64bit()) {
- status = setup_gop64(sys_table_arg, si, proto, size,
- gop_handle);
- } else {
- status = setup_gop32(sys_table_arg, si, proto, size,
- gop_handle);
- }
+ status = setup_gop(si, proto, size, gop_handle);
free_handle:
- efi_call_early(free_pool, gop_handle);
+ efi_bs_call(free_pool, gop_handle);
return status;
}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
new file mode 100644
index 000000000000..b025e59b94df
--- /dev/null
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI-related functions used by the EFI stub on multiple
+ * architectures.
+ *
+ * Copyright 2019 Google, LLC
+ */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+void efi_pci_disable_bridge_busmaster(void)
+{
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long pci_handle_size = 0;
+ efi_handle_t *pci_handle = NULL;
+ efi_handle_t handle;
+ efi_status_t status;
+ u16 class, command;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ pr_efi_err("Failed to locate PCI I/O handles'\n");
+ return;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
+ (void **)&pci_handle);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, pci_handle);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to locate PCI I/O handles'\n");
+ goto free_handle;
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+ unsigned long segment_nr, bus_nr, device_nr, func_nr;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ /*
+ * Disregard devices living on bus 0 - these are not behind a
+ * bridge so no point in disconnecting them from their drivers.
+ */
+ status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr,
+ &device_nr, &func_nr);
+ if (status != EFI_SUCCESS || bus_nr == 0)
+ continue;
+
+ /*
+ * Don't disconnect VGA controllers so we don't risk losing
+ * access to the framebuffer. Drivers for true PCIe graphics
+ * controllers that are behind a PCIe root port do not use
+ * DMA to implement the GOP framebuffer anyway [although they
+ * may use it in their implentation of Gop->Blt()], and so
+ * disabling DMA in the PCI bridge should not interfere with
+ * normal operation of the device.
+ */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+ if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA)
+ continue;
+
+ /* Disconnect this handle from all its drivers */
+ efi_bs_call(disconnect_controller, handle, NULL, NULL);
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+
+ if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI)
+ continue;
+
+ /* Disable busmastering */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER))
+ continue;
+
+ command &= ~PCI_COMMAND_MASTER;
+ status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS)
+ pr_efi_err("Failed to disable PCI busmastering\n");
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 35edd7cfb6a1..316ce9ff0193 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -9,38 +9,34 @@
#include "efistub.h"
-typedef struct efi_rng_protocol efi_rng_protocol_t;
-
-typedef struct {
- u32 get_info;
- u32 get_rng;
-} efi_rng_protocol_32_t;
-
-typedef struct {
- u64 get_info;
- u64 get_rng;
-} efi_rng_protocol_64_t;
-
-struct efi_rng_protocol {
- efi_status_t (*get_info)(struct efi_rng_protocol *,
- unsigned long *, efi_guid_t *);
- efi_status_t (*get_rng)(struct efi_rng_protocol *,
- efi_guid_t *, unsigned long, u8 *out);
+typedef union efi_rng_protocol efi_rng_protocol_t;
+
+union efi_rng_protocol {
+ struct {
+ efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *,
+ unsigned long *,
+ efi_guid_t *);
+ efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *,
+ efi_guid_t *, unsigned long,
+ u8 *out);
+ };
+ struct {
+ u32 get_info;
+ u32 get_rng;
+ } mixed_mode;
};
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
- unsigned long size, u8 *out)
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_status_t status;
- struct efi_rng_protocol *rng;
+ efi_rng_protocol_t *rng = NULL;
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS)
return status;
- return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out);
+ return efi_call_proto(rng, get_rng, NULL, size, out);
}
/*
@@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
*/
#define MD_NUM_SLOTS(md) ((md)->virt_addr)
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size,
+efi_status_t efi_random_alloc(unsigned long size,
unsigned long align,
unsigned long *addr,
unsigned long random_seed)
@@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
map.key_ptr = NULL;
map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
return status;
@@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
target = round_up(md->phys_addr, align) + target_slot * align;
pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, pages, &target);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
if (status == EFI_SUCCESS)
*addr = target;
break;
}
- efi_call_early(free_pool, memory_map);
+ efi_bs_call(free_pool, memory_map);
return status;
}
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+efi_status_t efi_random_get_seed(void)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
- struct efi_rng_protocol *rng;
- struct linux_efi_random_seed *seed;
+ efi_rng_protocol_t *rng = NULL;
+ struct linux_efi_random_seed *seed = NULL;
efi_status_t status;
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
- (void **)&seed);
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
+ (void **)&seed);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw,
+ status = efi_call_proto(rng, get_rng, &rng_algo_raw,
EFI_RANDOM_SEED_SIZE, seed->bits);
if (status == EFI_UNSUPPORTED)
@@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL,
- EFI_RANDOM_SEED_SIZE, seed->bits);
+ status = efi_call_proto(rng, get_rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
seed->size = EFI_RANDOM_SEED_SIZE;
- status = efi_call_early(install_configuration_table, &rng_table_guid,
- seed);
+ status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
if (status != EFI_SUCCESS)
goto err_freepool;
return EFI_SUCCESS;
err_freepool:
- efi_call_early(free_pool, seed);
+ efi_bs_call(free_pool, seed);
return status;
}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index edba5e7a3743..a765378ad18c 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
-#define get_efi_var(name, vendor, ...) \
- efi_call_runtime(get_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__);
-
/*
* Determine whether we're in secure boot mode.
*
* Please keep the logic in sync with
* arch/x86/xen/efi.c:xen_efi_get_secureboot().
*/
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+enum efi_secureboot_mode efi_get_secureboot(void)
{
u32 attr;
u8 secboot, setupmode, moksbstate;
@@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
- pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+ pr_efi("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
out_efi_err:
- pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+ pr_efi_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index eb9af83e4d59..1d59e103a2e3 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
#define MEMORY_ONLY_RESET_CONTROL_GUID \
EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
-#define get_efi_var(name, vendor, ...) \
- efi_call_runtime(get_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__)
-
-#define set_efi_var(name, vendor, ...) \
- efi_call_runtime(set_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__)
-
/*
* Enable reboot attack mitigation. This requests that the firmware clear the
* RAM on next reboot before proceeding with boot, ensuring that any secrets
* are cleared. If userland has ensured that all secrets have been removed
* from RAM before reboot it can simply reset this variable.
*/
-void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+void efi_enable_reset_attack_mitigation(void)
{
u8 val = 1;
efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
@@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
#endif
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(void)
{
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
@@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
size_t log_size, last_entry_size;
efi_bool_t truncated;
int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
- void *tcg2_protocol = NULL;
+ efi_tcg2_protocol_t *tcg2_protocol = NULL;
int final_events_size = 0;
- status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
- &tcg2_protocol);
+ status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
+ (void **)&tcg2_protocol);
if (status != EFI_SUCCESS)
return;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log,
- tcg2_protocol, version, &log_location,
- &log_last_entry, &truncated);
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry, &truncated);
if (status != EFI_SUCCESS || !log_location) {
version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log,
- tcg2_protocol, version, &log_location,
- &log_last_entry, &truncated);
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry,
+ &truncated);
if (status != EFI_SUCCESS || !log_location)
return;
@@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
}
/* Allocate space for the logs and copy them. */
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- sizeof(*log_tbl) + log_size,
- (void **) &log_tbl);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg,
- "Unable to allocate memory for event log\n");
+ efi_printk("Unable to allocate memory for event log\n");
return;
}
@@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
* Figure out whether any events have already been logged to the
* final events structure, and if so how much space they take up
*/
- final_events_table = get_efi_config_table(sys_table_arg,
- LINUX_EFI_TPM_FINAL_LOG_GUID);
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
int offset;
@@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
log_tbl->version = version;
memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
- status = efi_call_early(install_configuration_table,
- &linux_eventlog_guid, log_tbl);
+ status = efi_bs_call(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
if (status != EFI_SUCCESS)
goto err_free;
return;
err_free:
- efi_call_early(free_pool, log_tbl);
+ efi_bs_call(free_pool, log_tbl);
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 38b686c67b17..2ff1883dc788 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
return PFN_PHYS(page_to_pfn(p));
}
+void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
+{
+ if (flags & EFI_MEMMAP_MEMBLOCK) {
+ if (slab_is_available())
+ memblock_free_late(phys, size);
+ else
+ memblock_free(phys, size);
+ } else if (flags & EFI_MEMMAP_SLAB) {
+ struct page *p = pfn_to_page(PHYS_PFN(phys));
+ unsigned int order = get_order(size);
+
+ free_pages((unsigned long) page_address(p), order);
+ }
+}
+
+static void __init efi_memmap_free(void)
+{
+ __efi_memmap_free(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map,
+ efi.memmap.flags);
+}
+
/**
* efi_memmap_alloc - Allocate memory for the EFI memory map
* @num_entries: Number of entries in the allocated map.
+ * @data: efi memmap installation parameters
*
* Depending on whether mm_init() has already been invoked or not,
* either memblock or "normal" page allocation is used.
@@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
* Returns the physical address of the allocated memory map on
* success, zero on failure.
*/
-phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+int __init efi_memmap_alloc(unsigned int num_entries,
+ struct efi_memory_map_data *data)
{
- unsigned long size = num_entries * efi.memmap.desc_size;
-
- if (slab_is_available())
- return __efi_memmap_alloc_late(size);
+ /* Expect allocation parameters are zero initialized */
+ WARN_ON(data->phys_map || data->size);
+
+ data->size = num_entries * efi.memmap.desc_size;
+ data->desc_version = efi.memmap.desc_version;
+ data->desc_size = efi.memmap.desc_size;
+ data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
+ data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
+
+ if (slab_is_available()) {
+ data->flags |= EFI_MEMMAP_SLAB;
+ data->phys_map = __efi_memmap_alloc_late(data->size);
+ } else {
+ data->flags |= EFI_MEMMAP_MEMBLOCK;
+ data->phys_map = __efi_memmap_alloc_early(data->size);
+ }
- return __efi_memmap_alloc_early(size);
+ if (!data->phys_map)
+ return -ENOMEM;
+ return 0;
}
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
- * @late: Use early or late mapping function?
*
* This function takes care of figuring out which function to use to
* map the EFI memory map in efi.memmap based on how far into the boot
* we are.
*
- * During bootup @late should be %false since we only have access to
- * the early_memremap*() functions as the vmalloc space isn't setup.
- * Once the kernel is fully booted we can fallback to the more robust
- * memremap*() API.
+ * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
+ * only have access to the early_memremap*() functions as the vmalloc
+ * space isn't setup. Once the kernel is fully booted we can fallback
+ * to the more robust memremap*() API.
*
* Returns zero on success, a negative error code on failure.
*/
-static int __init
-__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+static int __init __efi_memmap_init(struct efi_memory_map_data *data)
{
struct efi_memory_map map;
phys_addr_t phys_map;
@@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
phys_map = data->phys_map;
- if (late)
+ if (data->flags & EFI_MEMMAP_LATE)
map.map = memremap(phys_map, data->size, MEMREMAP_WB);
else
map.map = early_memremap(phys_map, data->size);
@@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
return -ENOMEM;
}
+ /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
+ efi_memmap_free();
+
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
map.desc_version = data->desc_version;
map.desc_size = data->desc_size;
- map.late = late;
+ map.flags = data->flags;
set_bit(EFI_MEMMAP, &efi.flags);
@@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
int __init efi_memmap_init_early(struct efi_memory_map_data *data)
{
/* Cannot go backwards */
- WARN_ON(efi.memmap.late);
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
- return __efi_memmap_init(data, false);
+ data->flags = 0;
+ return __efi_memmap_init(data);
}
void __init efi_memmap_unmap(void)
@@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void)
if (!efi_enabled(EFI_MEMMAP))
return;
- if (!efi.memmap.late) {
+ if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
unsigned long size;
size = efi.memmap.desc_size * efi.memmap.nr_map;
@@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
struct efi_memory_map_data data = {
.phys_map = addr,
.size = size,
+ .flags = EFI_MEMMAP_LATE,
};
/* Did we forget to unmap the early EFI memmap? */
WARN_ON(efi.memmap.map);
/* Were we already called? */
- WARN_ON(efi.memmap.late);
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
/*
* It makes no sense to allow callers to register different
@@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
data.desc_version = efi.memmap.desc_version;
data.desc_size = efi.memmap.desc_size;
- return __efi_memmap_init(&data, true);
+ return __efi_memmap_init(&data);
}
/**
* efi_memmap_install - Install a new EFI memory map in efi.memmap
- * @addr: Physical address of the memory map
- * @nr_map: Number of entries in the memory map
+ * @ctx: map allocation parameters (address, size, flags)
*
* Unlike efi_memmap_init_*(), this function does not allow the caller
* to switch from early to late mappings. It simply uses the existing
@@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
*
* Returns zero on success, a negative error code on failure.
*/
-int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
+int __init efi_memmap_install(struct efi_memory_map_data *data)
{
- struct efi_memory_map_data data;
-
efi_memmap_unmap();
- data.phys_map = addr;
- data.size = efi.memmap.desc_size * nr_map;
- data.desc_version = efi.memmap.desc_version;
- data.desc_size = efi.memmap.desc_size;
-
- return __efi_memmap_init(&data, efi.memmap.late);
+ return __efi_memmap_init(data);
}
/**
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2ed599236a1c..b585f2ef6dd9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -312,6 +312,12 @@ config GPIO_IXP4XX
IXP4xx series of chips.
If unsure, say N.
+config GPIO_LOGICVC
+ tristate "Xylon LogiCVC GPIO support"
+ depends on MFD_SYSCON && OF
+ help
+ Say yes here to support GPIO functionality of the Xylon LogiCVC
+ programmable logic block.
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
@@ -335,14 +341,6 @@ config GPIO_LPC32XX
Select this option to enable GPIO driver for
NXP LPC32XX devices.
-config GPIO_LYNXPOINT
- tristate "Intel Lynxpoint GPIO support"
- depends on ACPI && X86
- select GPIOLIB_IRQCHIP
- help
- driver for GPIO functionality on Intel Lynxpoint PCH chipset
- Requires ACPI device enumeration code to set up a platform device.
-
config GPIO_MB86S7X
tristate "GPIO support for Fujitsu MB86S7x Platforms"
help
@@ -479,6 +477,15 @@ config GPIO_SAMA5D2_PIOBU
The difference from regular GPIOs is that they
maintain their value during backup/self-refresh.
+config GPIO_SIFIVE
+ bool "SiFive GPIO support"
+ depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
+ help
+ Say yes here to support the GPIO device on SiFive SoCs.
+
config GPIO_SIOX
tristate "SIOX GPIO support"
depends on SIOX
@@ -613,6 +620,13 @@ config GPIO_VX855
additional drivers must be enabled in order to use the
functionality of the device.
+config GPIO_WCD934X
+ tristate "Qualcomm Technologies Inc WCD9340/WCD9341 gpio controller driver"
+ depends on MFD_WCD934X && OF_GPIO
+ help
+ This driver is to supprot GPIO block found on the Qualcomm Technologies
+ Inc WCD9340/WCD9341 Audio Codec.
+
config GPIO_XGENE
bool "APM X-Gene GPIO controller support"
depends on ARM64 && OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 34eb8b2b12dd..76ff2a5feaba 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
+obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
@@ -76,7 +77,6 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
-obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -124,6 +124,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
@@ -157,6 +158,7 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 76f8c7ff18ff..3a44e6ae52bd 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -10,6 +10,28 @@ approach. This means that GPIO consumers, drivers and machine descriptions
ideally have no use or idea of the global GPIO numberspace that has/was
used in the inception of the GPIO subsystem.
+The numberspace issue is the same as to why irq is moving away from irq
+numbers to IRQ descriptors.
+
+The underlying motivation for this is that the GPIO numberspace has become
+unmanageable: machine board files tend to become full of macros trying to
+establish the numberspace at compile-time, making it hard to add any numbers
+in the middle (such as if you missed a pin on a chip) without the numberspace
+breaking.
+
+Machine descriptions such as device tree or ACPI does not have a concept of the
+Linux GPIO number as those descriptions are external to the Linux kernel
+and treat GPIO lines as abstract entities.
+
+The runtime-assigned GPIO numberspace (what you get if you assign the GPIO
+base as -1 in struct gpio_chip) has also became unpredictable due to factors
+such as probe ordering and the introduction of -EPROBE_DEFER making probe
+ordering of independent GPIO chips essentially unpredictable, as their base
+number will be assigned on a first come first serve basis.
+
+The best way to get out of the problem is to make the global GPIO numbers
+unimportant by simply not using them. GPIO descriptors deal with this.
+
Work items:
- Convert all GPIO device drivers to only #include <linux/gpio/driver.h>
@@ -33,7 +55,7 @@ This header and helpers appeared at one point when there was no proper
driver infrastructure for doing simpler MMIO GPIO devices and there was
no core support for parsing device tree GPIOs from the core library with
the [devm_]gpiod_get() calls we have today that will implicitly go into
-the device tree back-end.
+the device tree back-end. It is legacy and should not be used in new code.
Work items:
@@ -59,6 +81,15 @@ Work items:
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+Get rid of <linux/gpio.h>
+
+This legacy header is a one stop shop for anything GPIO is closely tied
+to the global GPIO numberspace. The endgame of the above refactorings will
+be the removal of <linux/gpio.h> and from that point only the specialized
+headers under <linux/gpio/*.h> will be used. This requires all the above to
+be completed and is expected to take a long time.
+
+
Collect drivers
Collect GPIO drivers from arch/* and other places that should be placed
@@ -109,7 +140,7 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
int irq; /* from platform etc */
struct my_gpio *g;
- struct gpio_irq_chip *girq
+ struct gpio_irq_chip *girq;
/* Set up the irqchip dynamically */
g->irq.name = "my_gpio_irq";
@@ -137,9 +168,14 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-- Support generic hierarchical GPIO interrupts: these are for the
- non-cascading case where there is one IRQ per GPIO line, there is
- currently no common infrastructure for this.
+- Drop gpiochip_set_chained_irqchip() when all the chained irqchips
+ have been converted to the above infrastructure.
+
+- Add more infrastructure to make it possible to also pass a threaded
+ irqchip in struct gpio_irq_chip.
+
+- Drop gpiochip_irqchip_add_nested() when all the chained irqchips
+ have been converted to the above infrastructure.
Increase integration with pin control
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9f2e6b04c361..cc4ba71e4fe3 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -266,7 +266,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->mmchip.gc.owner = THIS_MODULE;
altera_gc->mmchip.gc.parent = &pdev->dev;
- altera_gc->mapped_irq = platform_get_irq(pdev, 0);
+ altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
if (altera_gc->mapped_irq < 0)
goto skip_irq;
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 8319812593e3..d16645c1d8d9 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -391,7 +391,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
gpio->irq = rc;
- /* Disable IRQ and clear Interrupt status registers for all SPGIO Pins. */
+ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
bank = &aspeed_sgpio_banks[i];
/* disable irq enable bits */
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index f1037b61f763..879db23d8454 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -978,7 +978,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
/**
- * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handshaking with
* the coprocessor for shared GPIO banks
* @ops: The callbacks
* @data: Pointer passed back to the callbacks
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 4122683eb1f9..baee8c3f06ad 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/gpio/driver.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -586,11 +585,18 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
kona_gpio->gpio_chip = template_chip;
chip = &kona_gpio->gpio_chip;
- kona_gpio->num_bank = of_irq_count(dev->of_node);
- if (kona_gpio->num_bank == 0) {
+ ret = platform_irq_count(pdev);
+ if (!ret) {
dev_err(dev, "Couldn't determine # GPIO banks\n");
return -ENOENT;
+ } else if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't determine GPIO banks: (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
}
+ kona_gpio->num_bank = ret;
+
if (kona_gpio->num_bank > GPIO_MAX_BANK_NUM) {
dev_err(dev, "Too many GPIO banks configured (max=%d)\n",
GPIO_MAX_BANK_NUM);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index ff19a8ad5663..1d0827e79703 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -64,11 +64,11 @@ static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
if (layout->bit_per_gpio[i] < 1 || layout->bit_per_gpio[i] > 8)
return -EINVAL;
- /* Check that on valiue fits it's placeholder */
+ /* Check that on value fits its placeholder */
if (GENMASK(31, layout->bit_per_gpio[i]) & layout->on[i])
return -EINVAL;
- /* Check that off valiue fits it's placeholder */
+ /* Check that off value fits its placeholder */
if (GENMASK(31, layout->bit_per_gpio[i]) & layout->off[i])
return -EINVAL;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 08234e64993a..f954359c9544 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
-
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
+ spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
uirq->refcnt++;
@@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
- if (uirq->refcnt == 0)
+ if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
free_irq(uirq->uirq, priv);
+ return;
+ }
}
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
@@ -433,12 +435,9 @@ static int grgpio_probe(struct platform_device *ofdev)
static int grgpio_remove(struct platform_device *ofdev)
{
struct grgpio_priv *priv = platform_get_drvdata(ofdev);
- unsigned long flags;
int i;
int ret = 0;
- spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
if (priv->domain) {
for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
if (priv->uirqs[i].refcnt != 0) {
@@ -454,8 +453,6 @@ static int grgpio_remove(struct platform_device *ofdev)
irq_domain_remove(priv->domain);
out:
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
new file mode 100644
index 000000000000..015632cf159f
--- /dev/null
+++ b/drivers/gpio/gpio-logicvc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define LOGICVC_CTRL_REG 0x40
+#define LOGICVC_CTRL_GPIO_SHIFT 11
+#define LOGICVC_CTRL_GPIO_BITS 5
+
+#define LOGICVC_POWER_CTRL_REG 0x78
+#define LOGICVC_POWER_CTRL_GPIO_SHIFT 0
+#define LOGICVC_POWER_CTRL_GPIO_BITS 4
+
+struct logicvc_gpio {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+};
+
+static void logicvc_gpio_offset(struct logicvc_gpio *logicvc, unsigned offset,
+ unsigned int *reg, unsigned int *bit)
+{
+ if (offset >= LOGICVC_CTRL_GPIO_BITS) {
+ *reg = LOGICVC_POWER_CTRL_REG;
+
+ /* To the (virtual) power ctrl offset. */
+ offset -= LOGICVC_CTRL_GPIO_BITS;
+ /* To the actual bit offset in reg. */
+ offset += LOGICVC_POWER_CTRL_GPIO_SHIFT;
+ } else {
+ *reg = LOGICVC_CTRL_REG;
+
+ /* To the actual bit offset in reg. */
+ offset += LOGICVC_CTRL_GPIO_SHIFT;
+ }
+
+ *bit = BIT(offset);
+}
+
+static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+ unsigned int reg, bit, value;
+ int ret;
+
+ logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+ ret = regmap_read(logicvc->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ return !!(value & bit);
+}
+
+static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+ unsigned int reg, bit;
+
+ logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+ regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+}
+
+static int logicvc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* Pins are always configured as output, so just set the value. */
+ logicvc_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
+static struct regmap_config logicvc_gpio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "logicvc-gpio",
+};
+
+static int logicvc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_gpio *logicvc;
+ int ret;
+
+ logicvc = devm_kzalloc(dev, sizeof(*logicvc), GFP_KERNEL);
+ if (!logicvc)
+ return -ENOMEM;
+
+ /* Try to get regmap from parent first. */
+ logicvc->regmap = syscon_node_to_regmap(of_node->parent);
+
+ /* Grab our own regmap if that fails. */
+ if (IS_ERR(logicvc->regmap)) {
+ struct resource res;
+ void __iomem *base;
+
+ ret = of_address_to_resource(of_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get resource from address\n");
+ return ret;
+ }
+
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to map I/O base\n");
+ return PTR_ERR(base);
+ }
+
+ logicvc_gpio_regmap_config.max_register = resource_size(&res) -
+ logicvc_gpio_regmap_config.reg_stride;
+
+ logicvc->regmap =
+ devm_regmap_init_mmio(dev, base,
+ &logicvc_gpio_regmap_config);
+ if (IS_ERR(logicvc->regmap)) {
+ dev_err(dev, "Failed to create regmap for I/O\n");
+ return PTR_ERR(logicvc->regmap);
+ }
+ }
+
+ logicvc->chip.parent = dev;
+ logicvc->chip.owner = THIS_MODULE;
+ logicvc->chip.label = dev_name(dev);
+ logicvc->chip.base = -1;
+ logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
+ LOGICVC_POWER_CTRL_GPIO_BITS;
+ logicvc->chip.get = logicvc_gpio_get;
+ logicvc->chip.set = logicvc_gpio_set;
+ logicvc->chip.direction_output = logicvc_gpio_direction_output;
+
+ platform_set_drvdata(pdev, logicvc);
+
+ return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
+}
+
+static const struct of_device_id logicivc_gpio_of_table[] = {
+ {
+ .compatible = "xylon,logicvc-3.02.a-gpio",
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, logicivc_gpio_of_table);
+
+static struct platform_driver logicvc_gpio_driver = {
+ .driver = {
+ .name = "gpio-logicvc",
+ .of_match_table = logicivc_gpio_of_table,
+ },
+ .probe = logicvc_gpio_probe,
+};
+
+module_platform_driver(logicvc_gpio_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("Xylon LogiCVC GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
deleted file mode 100644
index 490ce7bae25e..000000000000
--- a/drivers/gpio/gpio-lynxpoint.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPIO controller driver for Intel Lynxpoint PCH chipset>
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-/* LynxPoint chipset has support for 94 gpio pins */
-
-#define LP_NUM_GPIO 94
-
-/* Bitmapped register offsets */
-#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
-#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
-#define LP_INT_STAT 0x80
-#define LP_INT_ENABLE 0x90
-
-/* Each pin has two 32 bit config registers, starting at 0x100 */
-#define LP_CONFIG1 0x100
-#define LP_CONFIG2 0x104
-
-/* LP_CONFIG1 reg bits */
-#define OUT_LVL_BIT BIT(31)
-#define IN_LVL_BIT BIT(30)
-#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
-#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
-#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
-#define USE_SEL_BIT BIT(0) /* 0: Native, 1: GPIO */
-
-/* LP_CONFIG2 reg bits */
-#define GPINDIS_BIT BIT(2) /* disable input sensing */
-#define GPIWP_BIT (BIT(0) | BIT(1)) /* weak pull options */
-
-struct lp_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- spinlock_t lock;
- unsigned long reg_base;
-};
-
-/*
- * Lynxpoint gpios are controlled through both bitmapped registers and
- * per gpio specific registers. The bitmapped registers are in chunks of
- * 3 x 32bit registers to cover all 94 gpios
- *
- * per gpio specific registers consist of two 32bit registers per gpio
- * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of
- * 188 config registers.
- *
- * A simplified view of the register layout look like this:
- *
- * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
- * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
- * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
- * ...
- * LP_INT_ENABLE[31:0] ...
- * LP_INT_ENABLE[63:31] ...
- * LP_INT_ENABLE[94:64] ...
- * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
- * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
- * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
- * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
- * LP2_CONFIG1 (gpio 2) ...
- * LP2_CONFIG2 (gpio 2) ...
- * ...
- * LP94_CONFIG1 (gpio 94) ...
- * LP94_CONFIG2 (gpio 94) ...
- */
-
-static unsigned long lp_gpio_reg(struct gpio_chip *chip, unsigned offset,
- int reg)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- int reg_offset;
-
- if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
- /* per gpio specific config registers */
- reg_offset = offset * 8;
- else
- /* bitmapped registers */
- reg_offset = (offset / 32) * 4;
-
- return lg->reg_base + reg + reg_offset;
-}
-
-static int lp_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
- unsigned long acpi_use = lp_gpio_reg(chip, offset, LP_ACPI_OWNED);
-
- pm_runtime_get(&lg->pdev->dev); /* should we put if failed */
-
- /* Fail if BIOS reserved pin for ACPI use */
- if (!(inl(acpi_use) & BIT(offset % 32))) {
- dev_err(&lg->pdev->dev, "gpio %d reserved for ACPI\n", offset);
- return -EBUSY;
- }
- /* Fail if pin is in alternate function mode (not GPIO mode) */
- if (!(inl(reg) & USE_SEL_BIT))
- return -ENODEV;
-
- /* enable input sensing */
- outl(inl(conf2) & ~GPINDIS_BIT, conf2);
-
-
- return 0;
-}
-
-static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
-
- /* disable input sensing */
- outl(inl(conf2) | GPINDIS_BIT, conf2);
-
- pm_runtime_put(&lg->pdev->dev);
-}
-
-static int lp_irq_type(struct irq_data *d, unsigned type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long flags;
- u32 value;
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
-
- if (hwirq >= lg->chip.ngpio)
- return -EINVAL;
-
- spin_lock_irqsave(&lg->lock, flags);
- value = inl(reg);
-
- /* set both TRIG_SEL and INV bits to 0 for rising edge */
- if (type & IRQ_TYPE_EDGE_RISING)
- value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
-
- /* TRIG_SEL bit 0, INV bit 1 for falling edge */
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
-
- /* TRIG_SEL bit 1, INV bit 0 for level low */
- if (type & IRQ_TYPE_LEVEL_LOW)
- value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
-
- /* TRIG_SEL bit 1, INV bit 1 for level high */
- if (type & IRQ_TYPE_LEVEL_HIGH)
- value |= TRIG_SEL_BIT | INT_INV_BIT;
-
- outl(value, reg);
-
- if (type & IRQ_TYPE_EDGE_BOTH)
- irq_set_handler_locked(d, handle_edge_irq);
- else if (type & IRQ_TYPE_LEVEL_MASK)
- irq_set_handler_locked(d, handle_level_irq);
-
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- return !!(inl(reg) & IN_LVL_BIT);
-}
-
-static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
-
- if (value)
- outl(inl(reg) | OUT_LVL_BIT, reg);
- else
- outl(inl(reg) & ~OUT_LVL_BIT, reg);
-
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) | DIR_BIT, reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static int lp_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- lp_gpio_set(chip, offset, value);
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) & ~DIR_BIT, reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static void lp_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- unsigned long reg, ena, pending;
- u32 base, pin;
-
- /* check from GPIO controller which pin triggered the interrupt */
- for (base = 0; base < lg->chip.ngpio; base += 32) {
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
- ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
-
- /* Only interrupts that are enabled */
- pending = inl(reg) & inl(ena);
-
- for_each_set_bit(pin, &pending, 32) {
- unsigned irq;
-
- /* Clear before handling so we don't lose an edge */
- outl(BIT(pin), reg);
-
- irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
- generic_handle_irq(irq);
- }
- }
- chip->irq_eoi(data);
-}
-
-static void lp_irq_unmask(struct irq_data *d)
-{
-}
-
-static void lp_irq_mask(struct irq_data *d)
-{
-}
-
-static void lp_irq_enable(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) | BIT(hwirq % 32), reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static void lp_irq_disable(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) & ~BIT(hwirq % 32), reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static struct irq_chip lp_irqchip = {
- .name = "LP-GPIO",
- .irq_mask = lp_irq_mask,
- .irq_unmask = lp_irq_unmask,
- .irq_enable = lp_irq_enable,
- .irq_disable = lp_irq_disable,
- .irq_set_type = lp_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg;
- unsigned base;
-
- for (base = 0; base < lg->chip.ngpio; base += 32) {
- /* disable gpio pin interrupts */
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
- outl(0, reg);
- /* Clear interrupt status register */
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
- outl(0xffffffff, reg);
- }
-
- return 0;
-}
-
-static int lp_gpio_probe(struct platform_device *pdev)
-{
- struct lp_gpio *lg;
- struct gpio_chip *gc;
- struct resource *io_rc, *irq_rc;
- struct device *dev = &pdev->dev;
- unsigned long reg_len;
- int ret = -ENODEV;
-
- lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
- if (!lg)
- return -ENOMEM;
-
- lg->pdev = pdev;
- platform_set_drvdata(pdev, lg);
-
- io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
- if (!io_rc) {
- dev_err(dev, "missing IO resources\n");
- return -EINVAL;
- }
-
- lg->reg_base = io_rc->start;
- reg_len = resource_size(io_rc);
-
- if (!devm_request_region(dev, lg->reg_base, reg_len, "lp-gpio")) {
- dev_err(dev, "failed requesting IO region 0x%x\n",
- (unsigned int)lg->reg_base);
- return -EBUSY;
- }
-
- spin_lock_init(&lg->lock);
-
- gc = &lg->chip;
- gc->label = dev_name(dev);
- gc->owner = THIS_MODULE;
- gc->request = lp_gpio_request;
- gc->free = lp_gpio_free;
- gc->direction_input = lp_gpio_direction_input;
- gc->direction_output = lp_gpio_direction_output;
- gc->get = lp_gpio_get;
- gc->set = lp_gpio_set;
- gc->base = -1;
- gc->ngpio = LP_NUM_GPIO;
- gc->can_sleep = false;
- gc->parent = dev;
-
- /* set up interrupts */
- if (irq_rc && irq_rc->start) {
- struct gpio_irq_chip *girq;
-
- girq = &gc->irq;
- girq->chip = &lp_irqchip;
- girq->init_hw = lp_gpio_irq_init_hw;
- girq->parent_handler = lp_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = (unsigned)irq_rc->start;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
- }
-
- ret = devm_gpiochip_add_data(dev, gc, lg);
- if (ret) {
- dev_err(dev, "failed adding lp-gpio chip\n");
- return ret;
- }
-
- pm_runtime_enable(dev);
-
- return 0;
-}
-
-static int lp_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_resume(struct device *dev)
-{
- struct lp_gpio *lg = dev_get_drvdata(dev);
- unsigned long reg;
- int i;
-
- /* on some hardware suspend clears input sensing, re-enable it here */
- for (i = 0; i < lg->chip.ngpio; i++) {
- if (gpiochip_is_requested(&lg->chip, i) != NULL) {
- reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
- outl(inl(reg) & ~GPINDIS_BIT, reg);
- }
- }
- return 0;
-}
-
-static const struct dev_pm_ops lp_gpio_pm_ops = {
- .runtime_suspend = lp_gpio_runtime_suspend,
- .runtime_resume = lp_gpio_runtime_resume,
- .resume = lp_gpio_resume,
-};
-
-static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
- { "INT33C7", 0 },
- { "INT3437", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
-
-static int lp_gpio_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver lp_gpio_driver = {
- .probe = lp_gpio_probe,
- .remove = lp_gpio_remove,
- .driver = {
- .name = "lp_gpio",
- .pm = &lp_gpio_pm_ops,
- .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
- },
-};
-
-static int __init lp_gpio_init(void)
-{
- return platform_driver_register(&lp_gpio_driver);
-}
-
-static void __exit lp_gpio_exit(void)
-{
- platform_driver_unregister(&lp_gpio_driver);
-}
-
-subsys_initcall(lp_gpio_init);
-module_exit(lp_gpio_exit);
-
-MODULE_AUTHOR("Mathias Nyman (Intel)");
-MODULE_DESCRIPTION("GPIO interface for Intel Lynxpoint");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 94b8d3ae27bc..7d343bea784a 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPIO Testing Device Driver
*
@@ -7,18 +7,18 @@
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio/driver.h>
+#include <linux/debugfs.h>
#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
#include "gpiolib.h"
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 5ae30de3490a..604dfec353a1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -296,6 +296,7 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
.gpio_dir_in_init = ls1028a_gpio_dir_in_init,
+ .irq_set_type = mpc8xxx_irq_set_type,
};
static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d1d785f983a7..b992321bb852 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -253,8 +253,7 @@ mediatek_gpio_bank_probe(struct device *dev,
/*
* Directly request the irq here instead of passing
- * a flow-handler to gpiochip_set_chained_irqchip,
- * because the irq is shared.
+ * a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 993bbeb3c006..d2b999c7987f 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -46,7 +46,6 @@
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -432,6 +431,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
u32 mask = d->mask;
irq_gc_lock(gc);
+ mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
@@ -1102,7 +1102,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
/* Some gpio controllers do not provide irq support */
- have_irqs = of_irq_count(np) != 0;
+ err = platform_irq_count(pdev);
+ if (err < 0)
+ return err;
+
+ have_irqs = err != 0;
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 9853547e7276..5638b4e5355f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -764,8 +764,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pca953x_irq_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT |
- IRQF_SHARED,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
@@ -855,8 +854,6 @@ out:
return ret;
}
-static const struct of_device_id pca953x_dt_ids[];
-
static int pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index b04c561f3280..4d47b2c41186 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -244,7 +244,6 @@ static struct platform_driver sama5d2_piobu_driver = {
module_platform_driver(sama5d2_piobu_driver);
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SAMA5D2 PIOBU controller driver");
MODULE_AUTHOR("Andrei Stefanescu <andrei.stefanescu@microchip.com>");
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
new file mode 100644
index 000000000000..147a1bd04515
--- /dev/null
+++ b/drivers/gpio/gpio-sifive.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+
+#define SIFIVE_GPIO_INPUT_VAL 0x00
+#define SIFIVE_GPIO_INPUT_EN 0x04
+#define SIFIVE_GPIO_OUTPUT_EN 0x08
+#define SIFIVE_GPIO_OUTPUT_VAL 0x0C
+#define SIFIVE_GPIO_RISE_IE 0x18
+#define SIFIVE_GPIO_RISE_IP 0x1C
+#define SIFIVE_GPIO_FALL_IE 0x20
+#define SIFIVE_GPIO_FALL_IP 0x24
+#define SIFIVE_GPIO_HIGH_IE 0x28
+#define SIFIVE_GPIO_HIGH_IP 0x2C
+#define SIFIVE_GPIO_LOW_IE 0x30
+#define SIFIVE_GPIO_LOW_IP 0x34
+#define SIFIVE_GPIO_OUTPUT_XOR 0x40
+
+#define SIFIVE_GPIO_MAX 32
+#define SIFIVE_GPIO_IRQ_OFFSET 7
+
+struct sifive_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ struct regmap *regs;
+ u32 irq_state;
+ unsigned int trigger[SIFIVE_GPIO_MAX];
+ unsigned int irq_parent[SIFIVE_GPIO_MAX];
+};
+
+static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
+{
+ unsigned long flags;
+ unsigned int trigger;
+
+ spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
+ (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_FALL_IE, BIT(offset),
+ (trigger & IRQ_TYPE_EDGE_FALLING) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_HIGH_IE, BIT(offset),
+ (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
+ (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
+ spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
+}
+
+static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d);
+
+ if (offset < 0 || offset >= gc->ngpio)
+ return -EINVAL;
+
+ chip->trigger[offset] = trigger;
+ sifive_gpio_set_ie(chip, offset);
+ return 0;
+}
+
+static void sifive_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+ u32 bit = BIT(offset);
+ unsigned long flags;
+
+ irq_chip_enable_parent(d);
+
+ /* Switch to input */
+ gc->direction_input(gc, offset);
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ /* Enable interrupts */
+ assign_bit(offset, (unsigned long *)&chip->irq_state, 1);
+ sifive_gpio_set_ie(chip, offset);
+}
+
+static void sifive_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+
+ assign_bit(offset, (unsigned long *)&chip->irq_state, 0);
+ sifive_gpio_set_ie(chip, offset);
+ irq_chip_disable_parent(d);
+}
+
+static void sifive_gpio_irq_eoi(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+ u32 bit = BIT(offset);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ irq_chip_eoi_parent(d);
+}
+
+static struct irq_chip sifive_gpio_irqchip = {
+ .name = "sifive-gpio",
+ .irq_set_type = sifive_gpio_irq_set_type,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_enable = sifive_gpio_irq_enable,
+ .irq_disable = sifive_gpio_irq_disable,
+ .irq_eoi = sifive_gpio_irq_eoi,
+};
+
+static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ *parent_type = IRQ_TYPE_NONE;
+ *parent = child + SIFIVE_GPIO_IRQ_OFFSET;
+ return 0;
+}
+
+static const struct regmap_config sifive_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .disable_locking = true,
+};
+
+static int sifive_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *irq_parent;
+ struct irq_domain *parent;
+ struct gpio_irq_chip *girq;
+ struct sifive_gpio *chip;
+ int ret, ngpio;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base)) {
+ dev_err(dev, "failed to allocate device memory\n");
+ return PTR_ERR(chip->base);
+ }
+
+ chip->regs = devm_regmap_init_mmio(dev, chip->base,
+ &sifive_gpio_regmap_config);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ngpio = of_irq_count(node);
+ if (ngpio >= SIFIVE_GPIO_MAX) {
+ dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
+ SIFIVE_GPIO_MAX);
+ return -ENXIO;
+ }
+
+ irq_parent = of_irq_find_parent(node);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ ret = bgpio_init(&chip->gc, dev, 4,
+ chip->base + SIFIVE_GPIO_INPUT_VAL,
+ chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ NULL,
+ chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ chip->base + SIFIVE_GPIO_INPUT_EN,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ /* Disable all GPIO interrupts before enabling parent interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
+ chip->irq_state = 0;
+
+ chip->gc.base = -1;
+ chip->gc.ngpio = ngpio;
+ chip->gc.label = dev_name(dev);
+ chip->gc.parent = dev;
+ chip->gc.owner = THIS_MODULE;
+ girq = &chip->gc.irq;
+ girq->chip = &sifive_gpio_irqchip;
+ girq->fwnode = of_node_to_fwnode(node);
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+
+ platform_set_drvdata(pdev, chip);
+ return gpiochip_add_data(&chip->gc, chip);
+}
+
+static const struct of_device_id sifive_gpio_match[] = {
+ { .compatible = "sifive,gpio0" },
+ { .compatible = "sifive,fu540-c000-gpio" },
+ { },
+};
+
+static struct platform_driver sifive_gpio_driver = {
+ .probe = sifive_gpio_probe,
+ .driver = {
+ .name = "sifive_gpio",
+ .of_match_table = of_match_ptr(sifive_gpio_match),
+ },
+};
+builtin_platform_driver(sifive_gpio_driver)
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5e375186f90e..866201cf5f65 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -243,4 +243,3 @@ static struct platform_driver tb10x_gpio_driver = {
module_platform_driver(tb10x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("tb10x gpio.");
-MODULE_VERSION("0.0.1");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 6fdfe4c5303e..acb99eff9939 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -96,12 +96,12 @@ struct tegra_gpio_info {
static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
u32 val, u32 reg)
{
- __raw_writel(val, tgi->regs + reg);
+ writel_relaxed(val, tgi->regs + reg);
}
static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
{
- return __raw_readl(tgi->regs + reg);
+ return readl_relaxed(tgi->regs + reg);
}
static unsigned int tegra_gpio_compose(unsigned int bank, unsigned int port,
@@ -416,11 +416,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
static int tegra_gpio_resume(struct device *dev)
{
struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
- unsigned long flags;
unsigned int b, p;
- local_irq_save(flags);
-
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
@@ -448,17 +445,14 @@ static int tegra_gpio_resume(struct device *dev)
}
}
- local_irq_restore(flags);
return 0;
}
static int tegra_gpio_suspend(struct device *dev)
{
struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
- unsigned long flags;
unsigned int b, p;
- local_irq_save(flags);
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
@@ -488,7 +482,7 @@ static int tegra_gpio_suspend(struct device *dev)
GPIO_INT_ENB(tgi, gpio));
}
}
- local_irq_restore(flags);
+
return 0;
}
@@ -497,6 +491,11 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
unsigned int gpio = d->hwirq;
u32 port, bit, mask;
+ int err;
+
+ err = irq_set_irq_wake(bank->irq, enable);
+ if (err)
+ return err;
port = GPIO_PORT(gpio);
bit = GPIO_BIT(gpio);
@@ -507,7 +506,7 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
else
bank->wake_enb[port] &= ~mask;
- return irq_set_irq_wake(bank->irq, enable);
+ return 0;
}
#endif
@@ -557,7 +556,7 @@ static inline void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
#endif
static const struct dev_pm_ops tegra_gpio_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
static int tegra_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 55b43b7ce88d..de241263d4be 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -448,17 +448,24 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
return 0;
}
-static void tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ struct irq_fwspec *fwspec;
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 3;
fwspec->param[0] = gpio->soc->instance;
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
+
+ return fwspec;
}
static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
@@ -621,7 +628,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
irq->chip = &gpio->intc;
irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
- irq->populate_parent_fwspec = tegra186_gpio_populate_parent_fwspec;
+ irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec;
irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate;
irq->handler = handle_simple_irq;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index d08d86a22b1f..9f66deab46ea 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <asm-generic/msi.h>
#define GPIO_RX_DAT 0x0
@@ -395,12 +396,32 @@ static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int *parent_type)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-
- *parent = txgpio->base_msi + (2 * child);
+ struct irq_data *irqd;
+ unsigned int irq;
+
+ irq = txgpio->msix_entries[child].vector;
+ irqd = irq_domain_get_irq_data(gc->irq.parent_domain, irq);
+ if (!irqd)
+ return -EINVAL;
+ *parent = irqd_to_hwirq(irqd);
*parent_type = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
+static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ msi_alloc_info_t *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->hwirq = parent_hwirq;
+ return info;
+}
+
static int thunderx_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -515,6 +536,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
girq->parent_domain =
irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
+ girq->populate_parent_alloc_arg = thunderx_gpio_populate_parent_alloc_info;
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -524,9 +546,15 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
- err = irq_domain_push_irq(chip->irq.domain,
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = of_node_to_fwnode(dev->of_node);
+ fwspec.param_count = 2;
+ fwspec.param[0] = i;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+ err = irq_domain_push_irq(girq->domain,
txgpio->msix_entries[i].vector,
- chip);
+ &fwspec);
if (err < 0)
dev_err(dev, "irq_domain_push_irq: %d\n", err);
}
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 4ff146ca32fe..3bf397b8dfbc 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -71,7 +71,7 @@ static inline u_int32_t gpio_o_bit(int i)
return 1 << (i + 13);
}
-/* Mapping betwee numeric GPIO ID and the actual GPIO hardware numbering:
+/* Mapping between numeric GPIO ID and the actual GPIO hardware numbering:
* 0..13 GPI 0..13
* 14..26 GPO 0..12
* 27..41 GPIO 0..14
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
new file mode 100644
index 000000000000..74913f2e5697
--- /dev/null
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+
+#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_REG_DIR_CTL_OFFSET 0x42
+#define WCD_REG_VAL_CTL_OFFSET 0x43
+#define WCD934X_NPINS 5
+
+struct wcd_gpio_data {
+ struct regmap *map;
+ struct gpio_chip chip;
+};
+
+static int wcd_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->map, WCD_REG_DIR_CTL_OFFSET, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & WCD_PIN_MASK(pin))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int wcd_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ return regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), 0);
+}
+
+static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
+ int val)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+
+ return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin),
+ val ? WCD_PIN_MASK(pin) : 0);
+}
+
+static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ int value;
+
+ regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
+
+ return !!(value && WCD_PIN_MASK(pin));
+}
+
+static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+{
+ wcd_gpio_direction_output(chip, pin, val);
+}
+
+static int wcd_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wcd_gpio_data *data;
+ struct gpio_chip *chip;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->map = dev_get_regmap(dev->parent, NULL);
+ if (!data->map) {
+ dev_err(dev, "%s: failed to get regmap\n", __func__);
+ return -EINVAL;
+ }
+
+ chip = &data->chip;
+ chip->direction_input = wcd_gpio_direction_input;
+ chip->direction_output = wcd_gpio_direction_output;
+ chip->get_direction = wcd_gpio_get_direction;
+ chip->get = wcd_gpio_get;
+ chip->set = wcd_gpio_set;
+ chip->parent = dev;
+ chip->base = -1;
+ chip->ngpio = WCD934X_NPINS;
+ chip->label = dev_name(dev);
+ chip->of_gpio_n_cells = 2;
+ chip->can_sleep = false;
+
+ return devm_gpiochip_add_data(dev, chip, data);
+}
+
+static const struct of_device_id wcd_gpio_of_match[] = {
+ { .compatible = "qcom,wcd9340-gpio" },
+ { .compatible = "qcom,wcd9341-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcd_gpio_of_match);
+
+static struct platform_driver wcd_gpio_driver = {
+ .driver = {
+ .name = "wcd934x-gpio",
+ .of_match_table = wcd_gpio_of_match,
+ },
+ .probe = wcd_gpio_probe,
+};
+
+module_platform_driver(wcd_gpio_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc WCD GPIO control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index b21c2e436b61..ad5489a65d54 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -251,8 +251,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
/*
* Directly request the irq here instead of passing
- * a flow-handler to gpiochip_set_chained_irqchip,
- * because the irq is shared.
+ * a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
IRQF_SHARED, chip->gc.label, &chip->gc);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b696e4598a24..1b3f217a35e2 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -132,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
int index)
{
/*
- * Handle MMC "cd-inverted" and "wp-inverted" semantics.
- */
- if (IS_ENABLED(CONFIG_MMC)) {
- /*
- * Active low is the default according to the
- * SDHCI specification and the device tree
- * bindings. However the code in the current
- * kernel was written such that the phandle
- * flags were always respected, and "cd-inverted"
- * would invert the flag from the device phandle.
- */
- if (!strcmp(propname, "cd-gpios")) {
- if (of_property_read_bool(np, "cd-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- if (!strcmp(propname, "wp-gpios")) {
- if (of_property_read_bool(np, "wp-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- }
- /*
* Some GPIO fixed regulator quirks.
* Note that active low is the default.
*/
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index fbf6b1a0a4fa..23e3d335cd54 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -762,10 +762,9 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
parent = &gdev->dev;
/* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent,
- MKDEV(0, 0),
- chip, gpiochip_groups,
- "gpiochip%d", chip->base);
+ dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), chip,
+ gpiochip_groups, GPIOCHIP_NAME "%d",
+ chip->base);
if (IS_ERR(dev))
return PTR_ERR(dev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 78a16e42f222..99ac27a72e28 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
* in the given chip for the specified hardware number.
*/
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
- u16 hwnum)
+ unsigned int hwnum)
{
struct gpio_device *gdev = chip->gpiodev;
@@ -232,15 +232,15 @@ int gpiod_get_direction(struct gpio_desc *desc)
return -ENOTSUPP;
ret = chip->get_direction(chip, offset);
- if (ret > 0) {
- /* GPIOF_DIR_IN, or other positive */
+ if (ret < 0)
+ return ret;
+
+ /* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */
+ if (ret > 0)
ret = 1;
- clear_bit(FLAG_IS_OUT, &desc->flags);
- }
- if (ret == 0) {
- /* GPIOF_DIR_OUT */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+
+ assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(gpiod_get_direction);
@@ -492,15 +492,6 @@ static int linehandle_validate_flags(u32 flags)
return 0;
}
-static void linehandle_configure_flag(unsigned long *flagsp,
- u32 bit, bool active)
-{
- if (active)
- set_bit(bit, flagsp);
- else
- clear_bit(bit, flagsp);
-}
-
static long linehandle_set_config(struct linehandle_state *lh,
void __user *ip)
{
@@ -522,22 +513,22 @@ static long linehandle_set_config(struct linehandle_state *lh,
desc = lh->descs[i];
flagsp = &desc->flags;
- linehandle_configure_flag(flagsp, FLAG_ACTIVE_LOW,
+ assign_bit(FLAG_ACTIVE_LOW, flagsp,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- linehandle_configure_flag(flagsp, FLAG_OPEN_DRAIN,
+ assign_bit(FLAG_OPEN_DRAIN, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- linehandle_configure_flag(flagsp, FLAG_OPEN_SOURCE,
+ assign_bit(FLAG_OPEN_SOURCE, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- linehandle_configure_flag(flagsp, FLAG_PULL_UP,
+ assign_bit(FLAG_PULL_UP, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- linehandle_configure_flag(flagsp, FLAG_PULL_DOWN,
+ assign_bit(FLAG_PULL_DOWN, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- linehandle_configure_flag(flagsp, FLAG_BIAS_DISABLE,
+ assign_bit(FLAG_BIAS_DISABLE, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
/*
@@ -686,14 +677,13 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- struct gpio_desc *desc;
+ struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
- if (offset >= gdev->ngpio) {
- ret = -EINVAL;
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
goto out_free_descs;
}
- desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
goto out_free_descs;
@@ -1018,8 +1008,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
- if (offset >= gdev->ngpio)
- return -EINVAL;
+ desc = gpiochip_get_desc(gdev->chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
/* Return an error if a unknown flag is set */
if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
@@ -1057,7 +1048,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
goto out_free_label;
@@ -1184,10 +1174,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset >= gdev->ngpio)
- return -EINVAL;
- desc = &gdev->descs[lineinfo.line_offset];
+ desc = gpiochip_get_desc(chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
if (desc->name) {
strncpy(lineinfo.name, desc->name,
sizeof(lineinfo.name));
@@ -1427,7 +1418,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
+ dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (chip->parent && chip->parent->driver)
@@ -1452,7 +1443,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (chip->ngpio > FASTPATH_NGPIO)
chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
- chip->ngpio, FASTPATH_NGPIO);
+ chip->ngpio, FASTPATH_NGPIO);
gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
@@ -1495,11 +1486,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_label;
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
for (i = 0; i < chip->ngpio; i++)
gdev->descs[i].gdev = gdev;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1524,15 +1515,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
- if (!chip->get_direction(chip, i))
- set_bit(FLAG_IS_OUT, &desc->flags);
- else
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ assign_bit(FLAG_IS_OUT,
+ &desc->flags, !chip->get_direction(chip, i));
} else {
- if (!chip->direction_input)
- set_bit(FLAG_IS_OUT, &desc->flags);
- else
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ assign_bit(FLAG_IS_OUT,
+ &desc->flags, !chip->direction_input);
}
}
@@ -1813,7 +1800,7 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
* @gc: the gpiochip to set the irqchip chain to
* @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
+ * cascaded irqchip
* @parent_handler: the parent interrupt handler for the accumulated IRQ
* coming out of the gpiochip. If the interrupt is nested rather than
* cascaded, pass NULL in this handler argument
@@ -1856,29 +1843,6 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
}
/**
- * gpiochip_set_chained_irqchip() - connects a chained irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip chain to
- * @irqchip: the irqchip to chain to the gpiochip
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
- * @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip.
- */
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler)
-{
- if (gpiochip->irq.threaded) {
- chip_err(gpiochip, "tried to chain a threaded gpiochip\n");
- return;
- }
-
- gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, parent_handler);
-}
-EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
-
-/**
* gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip nested handler to
* @irqchip: the irqchip to nest to the gpiochip
@@ -2003,7 +1967,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
+ void *parent_arg;
unsigned int parent_hwirq;
unsigned int parent_type;
struct gpio_irq_chip *girq = &gc->irq;
@@ -2019,7 +1983,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_info(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+ chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
ret = girq->child_to_parent_hwirq(gc, hwirq, type,
&parent_hwirq, &parent_type);
@@ -2027,7 +1991,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
chip_err(gc, "can't look up hwirq %lu\n", hwirq);
return ret;
}
- chip_info(gc, "found parent hwirq %u\n", parent_hwirq);
+ chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
/*
* We set handle_bad_irq because the .set_type() should
@@ -2042,23 +2006,27 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
NULL, NULL);
irq_set_probe(irq);
- /*
- * Create a IRQ fwspec to send up to the parent irqdomain:
- * specify the hwirq we address on the parent and tie it
- * all together up the chain.
- */
- parent_fwspec.fwnode = d->parent->fwnode;
/* This parent only handles asserted level IRQs */
- girq->populate_parent_fwspec(gc, &parent_fwspec, parent_hwirq,
- parent_type);
- chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type);
+ if (!parent_arg)
+ return -ENOMEM;
+
+ chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
irq, parent_hwirq);
- ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+ irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+ ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg);
+ /*
+ * If the parent irqdomain is msi, the interrupts have already
+ * been allocated, so the EEXIST is good.
+ */
+ if (irq_domain_is_msi(d->parent) && (ret == -EEXIST))
+ ret = 0;
if (ret)
chip_err(gc,
"failed to allocate parent hwirq %d for hwirq %lu\n",
parent_hwirq, hwirq);
+ kfree(parent_arg);
return ret;
}
@@ -2095,8 +2063,8 @@ static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
if (!gc->irq.child_offset_to_irq)
gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop;
- if (!gc->irq.populate_parent_fwspec)
- gc->irq.populate_parent_fwspec =
+ if (!gc->irq.populate_parent_alloc_arg)
+ gc->irq.populate_parent_alloc_arg =
gpiochip_populate_parent_fwspec_twocell;
gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
@@ -2122,27 +2090,43 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
return !!gc->irq.parent_domain;
}
-void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 2;
fwspec->param[0] = parent_hwirq;
fwspec->param[1] = parent_type;
+
+ return fwspec;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
-void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 4;
fwspec->param[0] = 0;
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = 0;
fwspec->param[3] = parent_type;
+
+ return fwspec;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
@@ -2998,7 +2982,8 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
* code on failure.
*/
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+ unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags)
@@ -3050,25 +3035,13 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
+static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
enum pin_config_param mode)
{
- unsigned long config;
- unsigned arg;
-
- switch (mode) {
- case PIN_CONFIG_BIAS_DISABLE:
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_BIAS_PULL_UP:
- arg = 1;
- break;
-
- default:
- arg = 0;
- }
+ if (!gc->set_config)
+ return -ENOTSUPP;
- config = PIN_CONF_PACKED(mode, arg);
- return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
+ return gc->set_config(gc, offset, mode);
}
static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
@@ -3302,15 +3275,9 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
- if (!chip->set || !chip->set_config) {
- gpiod_dbg(desc,
- "%s: missing set() or set_config() operations\n",
- __func__);
- return -ENOTSUPP;
- }
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3334,10 +3301,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
* Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- if (transitory)
- set_bit(FLAG_TRANSITORY, &desc->flags);
- else
- clear_bit(FLAG_TRANSITORY, &desc->flags);
+ assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
chip = desc->gdev->chip;
@@ -3347,7 +3311,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = chip->set_config(chip, gpio, packed);
+ rc = gpio_set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
@@ -3371,6 +3335,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+/**
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to change
+ */
+void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ VALIDATE_DESC_VOID(desc);
+ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
*
@@ -3793,10 +3768,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
- if (value)
- __set_bit(hwgpio, bits);
- else
- __clear_bit(hwgpio, bits);
+ __assign_bit(hwgpio, bits, value);
count++;
}
i++;
@@ -5113,7 +5085,7 @@ static int __init gpiolib_dev_init(void)
return ret;
}
- ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, "gpiochip");
+ ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
bus_unregister(&gpio_bus_type);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index ca9bc1e4803c..3e0aab2945d8 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/cdev.h>
+#define GPIOCHIP_NAME "gpiochip"
+
/**
* struct gpio_device - internal state container for GPIO devices
* @id: numerical ID number for the GPIO chip
@@ -78,7 +80,8 @@ struct gpio_array {
unsigned long invert_mask[];
};
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+ unsigned int hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 01a793a0cbf7..30a1e3ac21d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
- {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4ef4d31f5231..2f52b7f4d25c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 62d8289abb4e..4619f94f0ac7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
- read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
+ read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 644c72f9c594..e6afe4faeca6 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1199,6 +1201,7 @@ out:
}
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
@@ -1913,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+ switch (pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ return true;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ /* For sst branch device */
+ if (!mcs)
+ return true;
+
+ return false;
+ }
+ return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+ bool new_mcs)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb;
u8 rad[8], lct;
int ret = 0;
- if (port->pdt == new_pdt)
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0;
/* Teardown the old pdt, if there is one */
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /*
- * If the new PDT would also have an i2c bus, don't bother
- * with reregistering it
- */
- if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- new_pdt == DP_PEER_DEVICE_SST_SINK) {
- port->pdt = new_pdt;
- return 0;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /*
+ * If the new PDT would also have an i2c bus,
+ * don't bother with reregistering it
+ */
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+ return 0;
+ }
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mutex_lock(&mgr->lock);
- drm_dp_mst_topology_put_mstb(port->mstb);
- port->mstb = NULL;
- mutex_unlock(&mgr->lock);
- break;
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ } else {
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ }
}
port->pdt = new_pdt;
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
- break;
+ port->mcs = new_mcs;
- case DP_PEER_DEVICE_MST_BRANCHING:
- lct = drm_dp_calculate_rad(port, rad);
- mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (!mstb) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to create MSTB for port %p", port);
- goto out;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ } else {
+ lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p",
+ port);
+ goto out;
+ }
- mutex_lock(&mgr->lock);
- port->mstb = mstb;
- mstb->mgr = port->mgr;
- mstb->port_parent = port;
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
- mutex_unlock(&mgr->lock);
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
- /* And make sure we send a link address for this */
- ret = 1;
- break;
+ /* And make sure we send a link address for this */
+ ret = 1;
+ }
}
out:
@@ -2132,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
goto error;
}
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2198,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2242,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
- port->mcs = port_msg->mcs;
+ new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
@@ -2269,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
}
}
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
@@ -2283,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port
*/
- if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mcs)
send_link_addr = true;
if (port->connector)
@@ -2318,8 +2339,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps, ret;
+ int old_ddps, old_input, ret, i;
u8 new_pdt;
+ bool new_mcs;
bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2349,8 +2371,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
old_ddps = port->ddps;
+ old_input = port->input;
port->input = conn_stat->input_port;
- port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -2363,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ new_mcs = conn_stat->message_capability_status;
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
@@ -2373,6 +2395,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false;
}
+ if (!old_input && old_ddps != port->ddps && !port->ddps) {
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_mst_port *port_validated;
+
+ if (!vcpi)
+ continue;
+
+ port_validated =
+ container_of(vcpi, struct drm_dp_mst_port, vcpi);
+ port_validated =
+ drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+ if (!port_validated) {
+ mutex_lock(&mgr->payload_lock);
+ vcpi->num_slots = 0;
+ mutex_unlock(&mgr->payload_lock);
+ } else {
+ drm_dp_mst_topology_put_port(port_validated);
+ }
+ }
+ }
+
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
@@ -2718,9 +2762,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2760,7 +2806,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq))
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -3678,6 +3725,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
@@ -3687,6 +3735,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
no_msg:
drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
@@ -3896,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
+ if (!port->mcs)
+ ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
@@ -4497,7 +4550,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -4508,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
if (port->connector)
port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
}
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index afaf4bea21cf..9278bcfad1bf 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
* Map the GTT and the stolen memory area
*/
if (!resume)
- dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 7005f8f69c68..0900052fc484 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
- dev_priv->aux_reg = ioremap_nocache(resource_start,
+ dev_priv->aux_reg = ioremap(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 2fd4ca91a62d..8dd5a43e5486 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -211,7 +211,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
- priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 3d4f5775a4ba..25235ef630c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -9,16 +9,16 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u;
GEM_BUG_ON(id >= 16);
return 0x10000u << id;
}
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
{
/*
* The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu;
return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
return 0;
/* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
return flag(rq->engine->uabi_class);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 4c72d74d6576..0dbb44d30885 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -402,7 +402,7 @@ struct get_pages_work {
static struct sg_table *
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+ struct page **pvec, unsigned long num_pages)
{
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
@@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- const int npages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+ unsigned long pinned;
struct page **pvec;
- int pinned, ret;
+ int ret;
ret = -ENOMEM;
pinned = 0;
@@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const int num_pages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ef7bc41ffffa..5b7ff3ccfa8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma)
if (err)
return err;
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
@@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma)
vma->obj->mm.dirty = true;
return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
+static int __ring_active(struct intel_ring *ring)
+{
+ int err;
+
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ return err;
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_active;
+
+ return 0;
+
+err_active:
+ i915_active_release(&ring->vma->active);
+ return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+ intel_ring_unpin(ring);
+ i915_active_release(&ring->vma->active);
+}
+
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
@@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active)
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
intel_context_put(ce);
}
@@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active)
intel_context_get(ce);
- err = intel_ring_pin(ce->ring);
+ err = __ring_active(ce->ring);
if (err)
goto err_put;
@@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active)
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
err_put:
intel_context_put(ce);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 17f1f1441efc..2b446474e010 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -274,8 +274,8 @@ struct intel_engine_cs {
u8 class;
u8 instance;
- u8 uabi_class;
- u8 uabi_instance;
+ u16 uabi_class;
+ u16 uabi_instance;
u32 uabi_capabilities;
u32 context_size;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 68179fb56427..d925a1035c9d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2664,6 +2664,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6239a9adbf14..d6ce57d30958 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
+ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
@@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
+ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
iter.dma += I915_GTT_PAGE_SIZE;
@@ -2847,7 +2849,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* readback check when writing GTT PTE entries.
*/
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- ggtt->gsm = ioremap_nocache(phys_addr, size);
+ ggtt->gsm = ioremap(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
@@ -3304,7 +3306,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
- struct i915_vma *vma, *vn;
+ struct i915_vma *vma;
bool flush = false;
int open;
@@ -3319,15 +3321,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
open = atomic_xchg(&ggtt->vm.open, 0);
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
- if (!__i915_vma_unbind(vma))
- continue;
-
clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 6f09aa0be80a..d6d2e6fb8674 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1074,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
- if (!is_igp(i915))
+ if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
- "i915-%s",
+ "i915_%s",
dev_name(i915->drm.dev));
- else
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
pmu->name = "i915";
+ }
if (!pmu->name)
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 35cc69a3a1b9..05364eca20f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -25,6 +25,7 @@
#ifndef __I915_SELFTESTS_RANDOM_H__
#define __I915_SELFTESTS_RANDOM_H__
+#include <linux/math64.h>
#include <linux/random.h>
#include "../i915_selftest.h"
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c84f0a8b3f2c..ac678ace09a3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
size = resource_size(res);
- ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f61364f7c471..88b431a267af 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
+ struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- args->offset = bo->node.start << PAGE_SHIFT;
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return -EINVAL;
+ }
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo;
+ unsigned int i;
+ int ret;
+
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences)
return -ENOMEM;
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)args->bo_handles,
- job->bo_count, &job->bos);
+ ret = drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+ if (ret)
+ return ret;
+
+ job->mappings = kvmalloc_array(job->bo_count,
+ sizeof(struct panfrost_gem_mapping *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->mappings)
+ return -ENOMEM;
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct panfrost_gem_mapping *mapping;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ ret = -EINVAL;
+ break;
+ }
+
+ job->mappings[i] = mapping;
+ }
+
+ return ret;
}
/**
@@ -320,7 +357,9 @@ out:
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
+ struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
}
bo = to_panfrost_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
-
+ mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj);
+
+ if (!mapping)
+ return -EINVAL;
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+ int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT;
}
+ bo = to_panfrost_bo(gem_obj);
+
mutex_lock(&pfdev->shrinker_lock);
+ mutex_lock(&bo->mappings.lock);
+ if (args->madv == PANFROST_MADV_DONTNEED) {
+ struct panfrost_gem_mapping *first;
+
+ first = list_first_entry(&bo->mappings.list,
+ struct panfrost_gem_mapping,
+ node);
+
+ /*
+ * If we want to mark the BO purgeable, there must be only one
+ * user: the caller FD.
+ * We could do something smarter and mark the BO purgeable only
+ * when all its users have marked it purgeable, but globally
+ * visible/shared BOs are likely to never be marked purgeable
+ * anyway, so let's not bother.
+ */
+ if (!list_is_singular(&bo->mappings.list) ||
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+ ret = -EINVAL;
+ goto out_unlock_mappings;
+ }
+ }
+
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
+
+out_unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
- return 0;
+ return ret;
}
int panfrost_unstable_ioctl_check(void)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index fd766b1395fb..17b654e1eb94 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
+ /*
+ * If we still have mappings attached to the BO, there's a problem in
+ * our refcounting.
+ */
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_gem_shmem_free_object(obj);
}
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv)
+{
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ kref_get(&iter->refcount);
+ mapping = iter;
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_file_priv *priv;
+
+ if (mapping->active)
+ panfrost_mmu_unmap(mapping);
+
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&mapping->mmnode))
+ drm_mm_remove_node(&mapping->mmnode);
+ spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+ panfrost_gem_teardown_mapping(mapping);
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(mapping, &bo->mappings.list, node)
+ panfrost_gem_teardown_mapping(mapping);
+ mutex_unlock(&bo->mappings.lock);
+}
+
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->node);
+ kref_init(&mapping->refcount);
+ drm_gem_object_get(obj);
+ mapping->obj = bo;
/*
* Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
- bo->mmu = &priv->mmu;
+ mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock);
- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock);
if (ret)
- return ret;
+ goto err;
if (!bo->is_heap) {
- ret = panfrost_mmu_map(bo);
- if (ret) {
- spin_lock(&priv->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
- }
+ ret = panfrost_mmu_map(mapping);
+ if (ret)
+ goto err;
}
+
+ mutex_lock(&bo->mappings.lock);
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+ list_add_tail(&mapping->node, &bo->mappings.list);
+ mutex_unlock(&bo->mappings.lock);
+
+err:
+ if (ret)
+ panfrost_gem_mapping_put(mapping);
return ret;
}
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
- if (bo->is_mapped)
- panfrost_mmu_unmap(bo);
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ mapping = iter;
+ list_del(&iter->node);
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
- spin_lock(&priv->mm_lock);
- if (drm_mm_node_allocated(&bo->node))
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
+ panfrost_gem_mapping_put(mapping);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
if (!obj)
return NULL;
+ INIT_LIST_HEAD(&obj->mappings.list);
+ mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 4b17e7308764..ca1bc9019600 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct sg_table *sgts;
- struct panfrost_mmu *mmu;
- struct drm_mm_node node;
- bool is_mapped :1;
+ /*
+ * Use a list for now. If searching a mapping ever becomes the
+ * bottleneck, we should consider using an RB-tree, or even better,
+ * let the core store drm_gem_object_mapping entries (where we
+ * could place driver specific data) instead of drm_gem_object ones
+ * in its drm_file->object_idr table.
+ *
+ * struct drm_gem_object_mapping {
+ * struct drm_gem_object *obj;
+ * void *driver_priv;
+ * };
+ */
+ struct {
+ struct list_head list;
+ struct mutex lock;
+ } mappings;
+
bool noexec :1;
bool is_heap :1;
};
+struct panfrost_gem_mapping {
+ struct list_head node;
+ struct kref refcount;
+ struct panfrost_gem_object *obj;
+ struct drm_mm_node mmnode;
+ struct panfrost_mmu *mmu;
+ bool active :1;
+};
+
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
-static inline
-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
- return container_of(node, struct panfrost_gem_object, node);
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 458f0fa68111..f5dd7b29bc95 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (!mutex_trylock(&shmem->pages_lock))
return false;
- panfrost_mmu_unmap(to_panfrost_bo(obj));
+ panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index d411eb6c8eb9..e364ee00f3d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
- if (job->bos) {
+ if (job->mappings) {
for (i = 0; i < job->bo_count; i++)
+ panfrost_gem_mapping_put(job->mappings[i]);
+ kvfree(job->mappings);
+ }
+
+ if (job->bos) {
+ struct panfrost_gem_object *bo;
+
+ for (i = 0; i < job->bo_count; i++) {
+ bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]);
+ }
+
kvfree(job->bos);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 62454128a792..bbd3ba97ff67 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
+ struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos;
u32 bo_count;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index a3ed64a1f15e..763cfca886a7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
return 0;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
- if (WARN_ON(bo->is_mapped))
+ if (WARN_ON(mapping->active))
return 0;
if (bo->noexec)
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
- bo->is_mapped = true;
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ mapping->active = true;
return 0;
}
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- size_t len = bo->node.size << PAGE_SHIFT;
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- if (WARN_ON(!bo->is_mapped))
+ if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize;
}
- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
- bo->is_mapped = false;
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
+ mapping->mmnode.start << PAGE_SHIFT, len);
+ mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops);
}
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{
- struct panfrost_gem_object *bo = NULL;
+ struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +423,9 @@ found_mmu:
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
- bo = drm_mm_node_to_panfrost_bo(node);
- drm_gem_object_get(&bo->base.base);
+ mapping = drm_mm_node_to_panfrost_mapping(node);
+
+ kref_get(&mapping->refcount);
break;
}
}
@@ -427,7 +433,7 @@ found_mmu:
spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
- return bo;
+ return mapping;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr)
{
int ret, i;
+ struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
- bo = addr_to_drm_mm_node(pfdev, as, addr);
- if (!bo)
+ bomapping = addr_to_mapping(pfdev, as, addr);
+ if (!bomapping)
return -ENOENT;
+ bo = bomapping->obj;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
- bo->node.start << PAGE_SHIFT);
+ bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
- WARN_ON(bo->mmu->as != as);
+ WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
- page_offset -= bo->node.start;
+ page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock);
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
}
- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
- bo->is_mapped = true;
+ bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
- drm_gem_object_put_unlocked(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index 7c5b6775ae23..44fc2edf63ce 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,12 +4,12 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 2c04e858c50a..684820448be3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -25,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
- struct panfrost_gem_object *bo;
+ struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR,
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo))
return PTR_ERR(bo);
- perfcnt->bo = to_panfrost_bo(&bo->base);
-
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+ ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+ user);
+ if (!perfcnt->mapping) {
+ ret = -EINVAL;
+ goto err_close_bo;
+ }
+
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_close_bo;
+ goto err_put_mapping;
}
/*
@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+ /* The BO ref is retained by the mapping. */
+ drm_gem_object_put_unlocked(&bo->base);
+
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+ panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+ panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
- perfcnt->bo = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 098bc9f40b98..15bf8a207cb0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -443,7 +443,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.size);
else
mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
+ ioremap(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!mem->bus.addr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 83c4586665b4..81ac9b658a70 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -95,7 +95,7 @@ struct cdn_dp_device {
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
u8 max_lanes;
- u8 max_rate;
+ unsigned int max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 68289b0b063a..68261c7f8c5f 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
- dvo->regs = devm_ioremap_nocache(dev, res->start,
+ dvo->regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!dvo->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 8f7bf33815fd..2bb32009d117 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_ERROR("Invalid hda resource\n");
return -ENOMEM;
}
- hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hda->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hda->regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
if (res) {
- hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
resource_size(res));
if (!hda->video_dacs_ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 814560ead4e1..64ed102033c8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto release_adapter;
}
- hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hdmi->regs) {
ret = -ENOMEM;
goto release_adapter;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5767e93dd1cd..c36a8da373cb 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev)
DRM_ERROR("Invalid glue resource\n");
return -ENOMEM;
}
- tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!tvout->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 0b17ac8a3faa..5e5f82b6a5d9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev)
DRM_ERROR("Get memory resource failed\n");
return -ENOMEM;
}
- vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vtg->regs) {
DRM_ERROR("failed to remap I/O memory\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2a9e67597375..a3612369750f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -256,7 +256,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ priv->mmio = ioremap(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 6b0883a1776e..97fd1dafc3e8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
@@ -565,7 +565,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 390524143139..1635a9ff4794 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
plane->state->crtc_w,
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index cd9193078525..70e1cb928bf0 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_REPORT_LONG_LENGTH 20
#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0)
+#define HIDPP_REPORT_LONG_SUPPORTED BIT(1)
+#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2)
+
#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
#define HIDPP_SUB_ID_ROLLER 0x05
#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
@@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1)
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
+#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
@@ -135,12 +140,15 @@ struct hidpp_report {
struct hidpp_battery {
u8 feature_index;
u8 solar_feature_index;
+ u8 voltage_feature_index;
struct power_supply_desc desc;
struct power_supply *ps;
char name[64];
int status;
int capacity;
int level;
+ int voltage;
+ int charge_type;
bool online;
};
@@ -183,9 +191,12 @@ struct hidpp_device {
unsigned long quirks;
unsigned long capabilities;
+ u8 supported_reports;
struct hidpp_battery battery;
struct hidpp_scroll_counter vertical_wheel_counter;
+
+ u8 wireless_feature_index;
};
/* HID++ 1.0 error codes */
@@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
struct hidpp_report *message;
int ret, max_count;
+ /* Send as long report if short reports are not supported. */
+ if (report_id == REPORT_ID_HIDPP_SHORT &&
+ !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED))
+ report_id = REPORT_ID_HIDPP_LONG;
+
switch (report_id) {
case REPORT_ID_HIDPP_SHORT:
max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
@@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question,
(answer->fap.params[0] == question->fap.funcindex_clientid);
}
-static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
+ struct hidpp_report *report)
{
- return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
- (report->rap.sub_id == 0x41);
+ return (hidpp->wireless_feature_index &&
+ (report->fap.feature_index == hidpp->wireless_feature_index)) ||
+ ((report->report_id == REPORT_ID_HIDPP_SHORT) &&
+ (report->rap.sub_id == 0x41));
}
/**
@@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp,
return 0;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1001: Battery voltage */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001
+
+#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00
+
+#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00
+
+static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+ int *level, int *charge_type)
+{
+ int status;
+
+ long charge_sts = (long)data[2];
+
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ switch (data[2] & 0xe0) {
+ case 0x00:
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x20:
+ status = POWER_SUPPLY_STATUS_FULL;
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ break;
+ case 0x40:
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0xe0:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ if (test_bit(3, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+ if (test_bit(4, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ }
+
+ if (test_bit(5, &charge_sts)) {
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ }
+
+ *voltage = get_unaligned_be16(data);
+
+ return status;
+}
+
+static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
+ u8 feature_index,
+ int *status, int *voltage,
+ int *level, int *charge_type)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE,
+ NULL, 0, &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE;
+
+ *status = hidpp20_battery_map_status_voltage(params, voltage,
+ level, charge_type);
+
+ return 0;
+}
+
+static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+ int status, voltage, level, charge_type;
+
+ if (hidpp->battery.voltage_feature_index == 0xff) {
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
+ &hidpp->battery.voltage_feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = hidpp20_battery_get_battery_voltage(hidpp,
+ hidpp->battery.voltage_feature_index,
+ &status, &voltage, &level, &charge_type);
+
+ if (ret)
+ return ret;
+
+ hidpp->battery.status = status;
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ return 0;
+}
+
+static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ int status, voltage, level, charge_type;
+
+ if (report->fap.feature_index != hidpp->battery.voltage_feature_index ||
+ report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST)
+ return 0;
+
+ status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage,
+ &level, &charge_type);
+
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ if (hidpp->battery.ps)
+ power_supply_changed(hidpp->battery.ps);
+ }
+ return 0;
+}
+
static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */
+ 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */
};
static int hidpp_battery_get_property(struct power_supply *psy,
@@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = hidpp->hid_dev->uniq;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* hardware reports voltage in in mV. sysfs expects uV */
+ val->intval = hidpp->battery.voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = hidpp->battery.charge_type;
+ break;
default:
ret = -EINVAL;
break;
@@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
+/* 0x1d4b: Wireless device status */
+/* -------------------------------------------------------------------------- */
+#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+ &hidpp->wireless_feature_index,
+ &feature_type);
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x2120: Hi-resolution scrolling */
/* -------------------------------------------------------------------------- */
@@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
}
}
- if (unlikely(hidpp_report_is_connect_event(report))) {
+ if (unlikely(hidpp_report_is_connect_event(hidpp, report))) {
atomic_set(&hidpp->connected,
!(report->rap.params[0] & (1 << 6)));
if (schedule_work(&hidpp->work) == 0)
@@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
ret = hidpp_solar_battery_event(hidpp, data, size);
if (ret != 0)
return ret;
+ ret = hidpp20_battery_voltage_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
}
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
hidpp->battery.feature_index = 0xff;
hidpp->battery.solar_feature_index = 0xff;
+ hidpp->battery.voltage_feature_index = 0xff;
if (hidpp->protocol_major >= 2) {
if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
ret = hidpp_solar_request_battery_event(hidpp);
- else
- ret = hidpp20_query_battery_info(hidpp);
+ else {
+ ret = hidpp20_query_battery_voltage_info(hidpp);
+ if (ret)
+ ret = hidpp20_query_battery_info(hidpp);
+ }
if (ret)
return ret;
@@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
if (!battery_props)
return -ENOMEM;
- num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
+ num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
battery_props[num_battery_props++] =
@@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ battery_props[num_battery_props++] =
+ POWER_SUPPLY_PROP_VOLTAGE_NOW;
+
battery = &hidpp->battery;
n = atomic_inc_return(&battery_no) - 1;
@@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
else
hidpp10_query_battery_status(hidpp);
} else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
- hidpp20_query_battery_info(hidpp);
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ hidpp20_query_battery_voltage_info(hidpp);
+ else
+ hidpp20_query_battery_info(hidpp);
}
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
@@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
return report->field[0]->report_count + 1;
}
-static bool hidpp_validate_device(struct hid_device *hdev)
+static u8 hidpp_validate_device(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- int id, report_length, supported_reports = 0;
+ int id, report_length;
+ u8 supported_reports = 0;
id = REPORT_ID_HIDPP_SHORT;
report_length = hidpp_get_report_length(hdev, id);
@@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
if (report_length < HIDPP_REPORT_SHORT_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED;
}
id = REPORT_ID_HIDPP_LONG;
@@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
if (report_length < HIDPP_REPORT_LONG_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_LONG_SUPPORTED;
}
id = REPORT_ID_HIDPP_VERY_LONG;
@@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED;
hidpp->very_long_report_length = report_length;
}
@@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
/*
* Make sure the device is HID++ capable, otherwise treat as generic HID
*/
- if (!hidpp_validate_device(hdev)) {
+ hidpp->supported_reports = hidpp_validate_device(hdev);
+
+ if (!hidpp->supported_reports) {
hid_set_drvdata(hdev, NULL);
devm_kfree(&hdev->dev, hidpp);
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
@@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0) {
dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
__func__, ret);
- hid_hw_stop(hdev);
goto hid_hw_open_fail;
}
@@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hidpp_overwrite_name(hdev);
}
+ if (connected && hidpp->protocol_major >= 2) {
+ ret = hidpp_set_wireless_feature_index(hidpp);
+ if (ret == -ENOENT)
+ hidpp->wireless_feature_index = 0;
+ else if (ret)
+ goto hid_hw_init_fail;
+ }
+
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
@@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech MX Master 2S */
LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 3 */
+ LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech Performance MX */
LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
@@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* MX Master mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Master 3 mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{}
};
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 7a75aff78388..2eee5e31c2b7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -451,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+ int len = strlen(hid->uniq) + 1;
+ if (len > _IOC_SIZE(cmd))
+ len = _IOC_SIZE(cmd);
+ ret = copy_to_user(user_arg, hid->uniq, len) ?
+ -EFAULT : len;
+ break;
+ }
}
ret = -ENOTTY;
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 766bd8457346..296f9098c9e4 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -211,7 +211,7 @@ static struct timespec64 hv_get_adj_host_time(void)
unsigned long flags;
spin_lock_irqsave(&host_ts.lock, flags);
- reftime = hyperv_cs->read(hyperv_cs);
+ reftime = hv_read_reference_counter();
newtime = host_ts.host_time + (reftime - host_ts.ref_time);
ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
spin_unlock_irqrestore(&host_ts.lock, flags);
@@ -250,7 +250,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
*/
spin_lock_irqsave(&host_ts.lock, flags);
- cur_reftime = hyperv_cs->read(hyperv_cs);
+ cur_reftime = hv_read_reference_counter();
host_ts.host_time = hosttime;
host_ts.ref_time = cur_reftime;
@@ -315,7 +315,7 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime,
- hyperv_cs->read(hyperv_cs),
+ hv_read_reference_counter(),
timedatap->flags);
}
}
@@ -524,7 +524,7 @@ static struct ptp_clock *hv_ptp_clock;
static int hv_timesync_init(struct hv_util_service *srv)
{
/* TimeSync requires Hyper-V clocksource. */
- if (!hyperv_cs)
+ if (!hv_read_reference_counter)
return -ENODEV;
spin_lock_init(&host_ts.lock);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 23dfe848979a..47ac20aee06f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -164,6 +164,16 @@ config SENSORS_ADM1031
This driver can also be built as a module. If so, the module
will be called adm1031.
+config SENSORS_ADM1177
+ tristate "Analog Devices ADM1177 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Analog Devices ADM1177
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called adm1177.
+
config SENSORS_ADM9240
tristate "Analog Devices ADM9240 and compatibles"
depends on I2C
@@ -385,6 +395,16 @@ config SENSORS_ATXP1
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_DRIVETEMP
+ tristate "Hard disk drives with temperature sensors"
+ depends on SCSI && ATA
+ help
+ If you say yes you get support for the temperature sensor on
+ hard disk drives.
+
+ This driver can also be built as a module. If so, the module
+ will be called satatemp.
+
config SENSORS_DS620
tristate "Dallas Semiconductor DS620"
depends on I2C
@@ -889,7 +909,7 @@ config SENSORS_MAX197
will be called max197.
config SENSORS_MAX31722
-tristate "MAX31722 temperature sensor"
+ tristate "MAX31722 temperature sensor"
depends on SPI
help
Support for the Maxim Integrated MAX31722/MAX31723 digital
@@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX31730
+ tristate "MAX31730 temperature sensor"
+ depends on I2C
+ help
+ Support for the Maxim Integrated MAX31730 3-Channel Remote
+ Temperature Sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31730.
+
config SENSORS_MAX6621
tristate "Maxim MAX6621 sensor chip"
depends on I2C
@@ -1905,7 +1935,7 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+ tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
depends on !PPC
select HWMON_VID
help
@@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF
the Core 2 Duo. And also the W83627UHG, which is a stripped down
version of the W83627DHG (as far as hardware monitoring goes.)
- This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
- (also known as W83667HG-I), and NCT6776F.
+ This driver also supports Nuvoton W83667HG and W83667HG-B.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 6db5db9cdc29..613f50987965 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
+obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
+obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
@@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
new file mode 100644
index 000000000000..d314223a404a
--- /dev/null
+++ b/drivers/hwmon/adm1177.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin
+ *
+ * Copyright 2015-2019 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+/* Command Byte Operations */
+#define ADM1177_CMD_V_CONT BIT(0)
+#define ADM1177_CMD_I_CONT BIT(2)
+#define ADM1177_CMD_VRANGE BIT(4)
+
+/* Extended Register */
+#define ADM1177_REG_ALERT_TH 2
+
+#define ADM1177_BITS 12
+
+/**
+ * struct adm1177_state - driver instance specific data
+ * @client pointer to i2c client
+ * @reg regulator info for the the power supply of the device
+ * @r_sense_uohm current sense resistor value
+ * @alert_threshold_ua current limit for shutdown
+ * @vrange_high internal voltage divider
+ */
+struct adm1177_state {
+ struct i2c_client *client;
+ struct regulator *reg;
+ u32 r_sense_uohm;
+ u32 alert_threshold_ua;
+ bool vrange_high;
+};
+
+static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data)
+{
+ return i2c_master_recv(st->client, data, num);
+}
+
+static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd)
+{
+ return i2c_smbus_write_byte(st->client, cmd);
+}
+
+static int adm1177_write_alert_thr(struct adm1177_state *st,
+ u32 alert_threshold_ua)
+{
+ u64 val;
+ int ret;
+
+ val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm;
+ val = div_u64(val, 105840000U);
+ val = div_u64(val, 1000U);
+ if (val > 0xFF)
+ val = 0xFF;
+
+ ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH,
+ val);
+ if (ret)
+ return ret;
+
+ st->alert_threshold_ua = alert_threshold_ua;
+ return 0;
+}
+
+static int adm1177_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+ u8 data[3];
+ long dummy;
+ int ret;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[1] << 4) | (data[2] & 0xF);
+ /*
+ * convert to milliamperes
+ * ((105.84mV / 4096) x raw) / senseResistor(ohm)
+ */
+ *val = div_u64((105840000ull * dummy),
+ 4096 * st->r_sense_uohm);
+ return 0;
+ case hwmon_curr_max_alarm:
+ *val = st->alert_threshold_ua;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_in:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[0] << 4) | (data[2] >> 4);
+ /*
+ * convert to millivolts based on resistor devision
+ * (V_fullscale / 4096) * raw
+ */
+ if (st->vrange_high)
+ dummy *= 26350;
+ else
+ dummy *= 6650;
+
+ *val = DIV_ROUND_CLOSEST(dummy, 4096);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adm1177_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_max_alarm:
+ adm1177_write_alert_thr(st, val);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adm1177_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adm1177_state *st = data;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ if (st->r_sense_uohm)
+ return 0444;
+ return 0;
+ case hwmon_curr_max_alarm:
+ if (st->r_sense_uohm)
+ return 0644;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *adm1177_info[] = {
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops adm1177_hwmon_ops = {
+ .is_visible = adm1177_is_visible,
+ .read = adm1177_read,
+ .write = adm1177_write,
+};
+
+static const struct hwmon_chip_info adm1177_chip_info = {
+ .ops = &adm1177_hwmon_ops,
+ .info = adm1177_info,
+};
+
+static void adm1177_remove(void *data)
+{
+ struct adm1177_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int adm1177_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct adm1177_state *st;
+ u32 alert_threshold_ua;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->client = client;
+
+ st->reg = devm_regulator_get_optional(&client->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ if (PTR_ERR(st->reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ st->reg = NULL;
+ } else {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&client->dev, adm1177_remove,
+ st);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &st->r_sense_uohm))
+ st->r_sense_uohm = 0;
+ if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp",
+ &alert_threshold_ua)) {
+ if (st->r_sense_uohm)
+ /*
+ * set maximum default value from datasheet based on
+ * shunt-resistor
+ */
+ alert_threshold_ua = div_u64(105840000000,
+ st->r_sense_uohm);
+ else
+ alert_threshold_ua = 0;
+ }
+ st->vrange_high = device_property_read_bool(dev,
+ "adi,vrange-high-enable");
+ if (alert_threshold_ua && st->r_sense_uohm)
+ adm1177_write_alert_thr(st, alert_threshold_ua);
+
+ ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT |
+ ADM1177_CMD_I_CONT |
+ (st->vrange_high ? 0 : ADM1177_CMD_VRANGE));
+ if (ret)
+ return ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, client->name, st,
+ &adm1177_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id adm1177_id[] = {
+ {"adm1177", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adm1177_id);
+
+static const struct of_device_id adm1177_dt_ids[] = {
+ { .compatible = "adi,adm1177" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
+
+static struct i2c_driver adm1177_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adm1177",
+ .of_match_table = adm1177_dt_ids,
+ },
+ .probe = adm1177_probe,
+ .id_table = adm1177_id,
+};
+module_i2c_driver(adm1177_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 6c64d50c9aae..01c2eeb02aa9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
long reg;
if (bypass_attn & (1 << channel))
- reg = (volt * 1024) / 2250;
+ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
else
- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+ (r[0] + r[1]) * 2250);
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
new file mode 100644
index 000000000000..370d0c74eb01
--- /dev/null
+++ b/drivers/hwmon/drivetemp.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hwmon client for disk and solid state drives with temperature sensors
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ *
+ * With input from:
+ * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors.
+ * (C) 2018 Linus Walleij
+ *
+ * hwmon: Driver for SCSI/ATA temperature sensors
+ * by Constantin Baranov <const@mimas.ru>, submitted September 2009
+ *
+ * This drive supports reporting the temperatire of SATA drives. It can be
+ * easily extended to report the temperature of SCSI drives.
+ *
+ * The primary means to read drive temperatures and temperature limits
+ * for ATA drives is the SCT Command Transport feature set as specified in
+ * ATA8-ACS.
+ * It can be used to read the current drive temperature, temperature limits,
+ * and historic minimum and maximum temperatures. The SCT Command Transport
+ * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set
+ * (ATA8-ACS)".
+ *
+ * If the SCT Command Transport feature set is not available, drive temperatures
+ * may be readable through SMART attributes. Since SMART attributes are not well
+ * defined, this method is only used as fallback mechanism.
+ *
+ * There are three SMART attributes which may report drive temperatures.
+ * Those are defined as follows (from
+ * http://www.cropel.com/library/smart-attribute-list.aspx).
+ *
+ * 190 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 194 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 231 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * Wikipedia defines attributes a bit differently.
+ *
+ * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer
+ * Difference or to set a minimum threshold which corresponds to a
+ * Airflow maximum temperature. This also follows the convention of
+ * Temperature 100 being a best-case value and lower values being
+ * undesirable. However, some older drives may instead
+ * report raw Temperature (identical to 0xC2) or
+ * Temperature minus 50 here.
+ * 194 Temperature or Indicates the device temperature, if the appropriate
+ * Temperature sensor is fitted. Lowest byte of the raw value contains
+ * Celsius the exact temperature value (Celsius degrees).
+ * 231 Life Left Indicates the approximate SSD life left, in terms of
+ * (SSDs) or program/erase cycles or available reserved blocks.
+ * Temperature A normalized value of 100 represents a new drive, with
+ * a threshold value at 10 indicating a need for
+ * replacement. A value of 0 may mean that the drive is
+ * operating in read-only mode to allow data recovery.
+ * Previously (pre-2010) occasionally used for Drive
+ * Temperature (more typically reported at 0xC2).
+ *
+ * Common denominator is that the first raw byte reports the temperature
+ * in degrees C on almost all drives. Some drives may report a fractional
+ * temperature in the second raw byte.
+ *
+ * Known exceptions (from libatasmart):
+ * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th
+ * degrees C in the first two raw bytes.
+ * - A few Maxtor drives report an unknown or bad value in attribute 194.
+ * - Certain Apple SSD drives report an unknown value in attribute 190.
+ * Only certain firmware versions are affected.
+ *
+ * Those exceptions affect older ATA drives and are currently ignored.
+ * Also, the second raw byte (possibly reporting the fractional temperature)
+ * is currently ignored.
+ *
+ * Many drives also report temperature limits in additional SMART data raw
+ * bytes. The format of those is not well defined and varies widely.
+ * The driver does not currently attempt to report those limits.
+ *
+ * According to data in smartmontools, attribute 231 is rarely used to report
+ * drive temperatures. At the same time, several drives report SSD life left
+ * in attribute 231, but do not support temperature sensors. For this reason,
+ * attribute 231 is currently ignored.
+ *
+ * Following above definitions, temperatures are reported as follows.
+ * If SCT Command Transport is supported, it is used to read the
+ * temperature and, if available, temperature limits.
+ * - Otherwise, if SMART attribute 194 is supported, it is used to read
+ * the temperature.
+ * - Otherwise, if SMART attribute 190 is supported, it is used to read
+ * the temperature.
+ */
+
+#include <linux/ata.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_proto.h>
+
+struct drivetemp_data {
+ struct list_head list; /* list of instantiated devices */
+ struct mutex lock; /* protect data buffer accesses */
+ struct scsi_device *sdev; /* SCSI device */
+ struct device *dev; /* instantiating device */
+ struct device *hwdev; /* hardware monitoring device */
+ u8 smartdata[ATA_SECT_SIZE]; /* local buffer */
+ int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val);
+ bool have_temp_lowest; /* lowest temp in SCT status */
+ bool have_temp_highest; /* highest temp in SCT status */
+ bool have_temp_min; /* have min temp */
+ bool have_temp_max; /* have max temp */
+ bool have_temp_lcrit; /* have lower critical limit */
+ bool have_temp_crit; /* have critical limit */
+ int temp_min; /* min temp */
+ int temp_max; /* max temp */
+ int temp_lcrit; /* lower critical limit */
+ int temp_crit; /* critical limit */
+};
+
+static LIST_HEAD(drivetemp_devlist);
+
+#define ATA_MAX_SMART_ATTRS 30
+#define SMART_TEMP_PROP_190 190
+#define SMART_TEMP_PROP_194 194
+
+#define SCT_STATUS_REQ_ADDR 0xe0
+#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */
+#define SCT_STATUS_VERSION_HIGH 1
+#define SCT_STATUS_TEMP 200
+#define SCT_STATUS_TEMP_LOWEST 201
+#define SCT_STATUS_TEMP_HIGHEST 202
+#define SCT_READ_LOG_ADDR 0xe1
+#define SMART_READ_LOG 0xd5
+#define SMART_WRITE_LOG 0xd6
+
+#define INVALID_TEMP 0x80
+
+#define temp_is_valid(temp) ((temp) != INVALID_TEMP)
+#define temp_from_sct(temp) (((s8)(temp)) * 1000)
+
+static inline bool ata_id_smart_supported(u16 *id)
+{
+ return id[ATA_ID_COMMAND_SET_1] & BIT(0);
+}
+
+static inline bool ata_id_smart_enabled(u16 *id)
+{
+ return id[ATA_ID_CFS_ENABLE_1] & BIT(0);
+}
+
+static int drivetemp_scsi_command(struct drivetemp_data *st,
+ u8 ata_command, u8 feature,
+ u8 lba_low, u8 lba_mid, u8 lba_high)
+{
+ u8 scsi_cmd[MAX_COMMAND_SIZE];
+ int data_dir;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = ATA_16;
+ if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) {
+ scsi_cmd[1] = (5 << 1); /* PIO Data-out */
+ /*
+ * No off.line or cc, write to dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x06;
+ data_dir = DMA_TO_DEVICE;
+ } else {
+ scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+ /*
+ * No off.line or cc, read from dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x0e;
+ data_dir = DMA_FROM_DEVICE;
+ }
+ scsi_cmd[4] = feature;
+ scsi_cmd[6] = 1; /* 1 sector */
+ scsi_cmd[8] = lba_low;
+ scsi_cmd[10] = lba_mid;
+ scsi_cmd[12] = lba_high;
+ scsi_cmd[14] = ata_command;
+
+ return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
+ st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
+ NULL);
+}
+
+static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+ u8 select)
+{
+ return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select,
+ ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS);
+}
+
+static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr,
+ long *temp)
+{
+ u8 *buf = st->smartdata;
+ bool have_temp = false;
+ u8 temp_raw;
+ u8 csum;
+ int err;
+ int i;
+
+ err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0);
+ if (err)
+ return err;
+
+ /* Checksum the read value table */
+ csum = 0;
+ for (i = 0; i < ATA_SECT_SIZE; i++)
+ csum += buf[i];
+ if (csum) {
+ dev_dbg(&st->sdev->sdev_gendev,
+ "checksum error reading SMART values\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) {
+ u8 *attr = buf + i * 12;
+ int id = attr[2];
+
+ if (!id)
+ continue;
+
+ if (id == SMART_TEMP_PROP_190) {
+ temp_raw = attr[7];
+ have_temp = true;
+ }
+ if (id == SMART_TEMP_PROP_194) {
+ temp_raw = attr[7];
+ have_temp = true;
+ break;
+ }
+ }
+
+ if (have_temp) {
+ *temp = temp_raw * 1000;
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+{
+ u8 *buf = st->smartdata;
+ int err;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ return err;
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+ break;
+ case hwmon_temp_lowest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+ break;
+ case hwmon_temp_highest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int drivetemp_identify_sata(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+ u8 *buf = st->smartdata;
+ struct scsi_vpd *vpd;
+ bool is_ata, is_sata;
+ bool have_sct_data_table;
+ bool have_sct_temp;
+ bool have_smart;
+ bool have_sct;
+ u16 *ata_id;
+ u16 version;
+ long temp;
+ int err;
+
+ /* SCSI-ATA Translation present? */
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+
+ /*
+ * Verify that ATA IDENTIFY DEVICE data is included in ATA Information
+ * VPD and that the drive implements the SATA protocol.
+ */
+ if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA ||
+ vpd->data[36] != 0x34) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ ata_id = (u16 *)&vpd->data[60];
+ is_ata = ata_id_is_ata(ata_id);
+ is_sata = ata_id_is_sata(ata_id);
+ have_sct = ata_id_sct_supported(ata_id);
+ have_sct_data_table = ata_id_sct_data_tables(ata_id);
+ have_smart = ata_id_smart_supported(ata_id) &&
+ ata_id_smart_enabled(ata_id);
+
+ rcu_read_unlock();
+
+ /* bail out if this is not a SATA device */
+ if (!is_ata || !is_sata)
+ return -ENODEV;
+ if (!have_sct)
+ goto skip_sct;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct;
+
+ version = (buf[SCT_STATUS_VERSION_HIGH] << 8) |
+ buf[SCT_STATUS_VERSION_LOW];
+ if (version != 2 && version != 3)
+ goto skip_sct;
+
+ have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]);
+ if (!have_sct_temp)
+ goto skip_sct;
+
+ st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]);
+ st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+ if (!have_sct_data_table)
+ goto skip_sct;
+
+ /* Request and read temperature history table */
+ memset(buf, '\0', sizeof(st->smartdata));
+ buf[0] = 5; /* data table command */
+ buf[2] = 1; /* read table */
+ buf[4] = 2; /* temperature history table */
+
+ err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ /*
+ * Temperature limits per AT Attachment 8 -
+ * ATA/ATAPI Command Set (ATA8-ACS)
+ */
+ st->have_temp_max = temp_is_valid(buf[6]);
+ st->have_temp_crit = temp_is_valid(buf[7]);
+ st->have_temp_min = temp_is_valid(buf[8]);
+ st->have_temp_lcrit = temp_is_valid(buf[9]);
+
+ st->temp_max = temp_from_sct(buf[6]);
+ st->temp_crit = temp_from_sct(buf[7]);
+ st->temp_min = temp_from_sct(buf[8]);
+ st->temp_lcrit = temp_from_sct(buf[9]);
+
+skip_sct_data:
+ if (have_sct_temp) {
+ st->get_temp = drivetemp_get_scttemp;
+ return 0;
+ }
+skip_sct:
+ if (!have_smart)
+ return -ENODEV;
+ st->get_temp = drivetemp_get_smarttemp;
+ return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp);
+}
+
+static int drivetemp_identify(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+
+ /* Bail out immediately if there is no inquiry data */
+ if (!sdev->inquiry || sdev->inquiry_len < 16)
+ return -ENODEV;
+
+ /* Disk device? */
+ if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC)
+ return -ENODEV;
+
+ return drivetemp_identify_sata(st);
+}
+
+static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct drivetemp_data *st = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_lowest:
+ case hwmon_temp_highest:
+ mutex_lock(&st->lock);
+ err = st->get_temp(st, attr, val);
+ mutex_unlock(&st->lock);
+ break;
+ case hwmon_temp_lcrit:
+ *val = st->temp_lcrit;
+ break;
+ case hwmon_temp_min:
+ *val = st->temp_min;
+ break;
+ case hwmon_temp_max:
+ *val = st->temp_max;
+ break;
+ case hwmon_temp_crit:
+ *val = st->temp_crit;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static umode_t drivetemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct drivetemp_data *st = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_lowest:
+ if (st->have_temp_lowest)
+ return 0444;
+ break;
+ case hwmon_temp_highest:
+ if (st->have_temp_highest)
+ return 0444;
+ break;
+ case hwmon_temp_min:
+ if (st->have_temp_min)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ if (st->have_temp_max)
+ return 0444;
+ break;
+ case hwmon_temp_lcrit:
+ if (st->have_temp_lcrit)
+ return 0444;
+ break;
+ case hwmon_temp_crit:
+ if (st->have_temp_crit)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *drivetemp_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_LCRIT | HWMON_T_CRIT),
+ NULL
+};
+
+static const struct hwmon_ops drivetemp_ops = {
+ .is_visible = drivetemp_is_visible,
+ .read = drivetemp_read,
+};
+
+static const struct hwmon_chip_info drivetemp_chip_info = {
+ .ops = &drivetemp_ops,
+ .info = drivetemp_info,
+};
+
+/*
+ * The device argument points to sdev->sdev_dev. Its parent is
+ * sdev->sdev_gendev, which we can use to get the scsi_device pointer.
+ */
+static int drivetemp_add(struct device *dev, struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev->parent);
+ struct drivetemp_data *st;
+ int err;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->sdev = sdev;
+ st->dev = dev;
+ mutex_init(&st->lock);
+
+ if (drivetemp_identify(st)) {
+ err = -ENODEV;
+ goto abort;
+ }
+
+ st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp",
+ st, &drivetemp_chip_info,
+ NULL);
+ if (IS_ERR(st->hwdev)) {
+ err = PTR_ERR(st->hwdev);
+ goto abort;
+ }
+
+ list_add(&st->list, &drivetemp_devlist);
+ return 0;
+
+abort:
+ kfree(st);
+ return err;
+}
+
+static void drivetemp_remove(struct device *dev, struct class_interface *intf)
+{
+ struct drivetemp_data *st, *tmp;
+
+ list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) {
+ if (st->dev == dev) {
+ list_del(&st->list);
+ hwmon_device_unregister(st->hwdev);
+ kfree(st);
+ break;
+ }
+ }
+}
+
+static struct class_interface drivetemp_interface = {
+ .add_dev = drivetemp_add,
+ .remove_dev = drivetemp_remove,
+};
+
+static int __init drivetemp_init(void)
+{
+ return scsi_register_interface(&drivetemp_interface);
+}
+
+static void __exit drivetemp_exit(void)
+{
+ scsi_unregister_interface(&drivetemp_interface);
+}
+
+module_init(drivetemp_init);
+module_exit(drivetemp_exit);
+
+MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
+MODULE_DESCRIPTION("Hard drive temperature monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 1f3b30b085b9..6a30fb453f7a 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
#define to_hwmon_attr(d) \
container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
/*
* Thermal zone information
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
* also provides the sensor index.
*/
struct hwmon_thermal_data {
- struct hwmon_device *hwdev; /* Reference to hwmon device */
+ struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
};
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
NULL
};
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+ int i;
+
+ for (i = 0; attrs[i]; i++) {
+ struct device_attribute *dattr = to_dev_attr(attrs[i]);
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+ kfree(hattr);
+ }
+ kfree(attrs);
+}
+
static void hwmon_dev_release(struct device *dev)
{
- kfree(to_hwmon_device(dev));
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ if (hwdev->group.attrs)
+ hwmon_free_attrs(hwdev->group.attrs);
+ kfree(hwdev->groups);
+ kfree(hwdev);
}
static struct class hwmon_class = {
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
- struct hwmon_device *hwdev = tdata->hwdev;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
return ret;
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
if (!tdata)
return -ENOMEM;
- tdata->hwdev = hwdev;
+ tdata->dev = dev;
tdata->index = index;
- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
return 0;
}
#else
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
return 0;
}
@@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
- if (type == hwmon_in)
+ if (type == hwmon_in || type == hwmon_intrusion)
return 0;
return 1;
}
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
(type == hwmon_fan && attr == hwmon_fan_label);
}
-static struct attribute *hwmon_genattr(struct device *dev,
- const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr,
int index,
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
if ((mode & 0222) && !ops->write)
return ERR_PTR(-EINVAL);
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
if (!hattr)
return ERR_PTR(-ENOMEM);
@@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = {
};
static const char * const hwmon_temp_attr_templates[] = {
+ [hwmon_temp_enable] = "temp%d_enable",
[hwmon_temp_input] = "temp%d_input",
[hwmon_temp_type] = "temp%d_type",
[hwmon_temp_lcrit] = "temp%d_lcrit",
@@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = {
};
static const char * const hwmon_in_attr_templates[] = {
+ [hwmon_in_enable] = "in%d_enable",
[hwmon_in_input] = "in%d_input",
[hwmon_in_min] = "in%d_min",
[hwmon_in_max] = "in%d_max",
@@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = {
[hwmon_in_max_alarm] = "in%d_max_alarm",
[hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
[hwmon_in_crit_alarm] = "in%d_crit_alarm",
- [hwmon_in_enable] = "in%d_enable",
};
static const char * const hwmon_curr_attr_templates[] = {
+ [hwmon_curr_enable] = "curr%d_enable",
[hwmon_curr_input] = "curr%d_input",
[hwmon_curr_min] = "curr%d_min",
[hwmon_curr_max] = "curr%d_max",
@@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = {
};
static const char * const hwmon_power_attr_templates[] = {
+ [hwmon_power_enable] = "power%d_enable",
[hwmon_power_average] = "power%d_average",
[hwmon_power_average_interval] = "power%d_average_interval",
[hwmon_power_average_interval_max] = "power%d_interval_max",
@@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = {
};
static const char * const hwmon_energy_attr_templates[] = {
+ [hwmon_energy_enable] = "energy%d_enable",
[hwmon_energy_input] = "energy%d_input",
[hwmon_energy_label] = "energy%d_label",
};
static const char * const hwmon_humidity_attr_templates[] = {
+ [hwmon_humidity_enable] = "humidity%d_enable",
[hwmon_humidity_input] = "humidity%d_input",
[hwmon_humidity_label] = "humidity%d_label",
[hwmon_humidity_min] = "humidity%d_min",
@@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = {
};
static const char * const hwmon_fan_attr_templates[] = {
+ [hwmon_fan_enable] = "fan%d_enable",
[hwmon_fan_input] = "fan%d_input",
[hwmon_fan_label] = "fan%d_label",
[hwmon_fan_min] = "fan%d_min",
@@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = {
[hwmon_pwm_freq] = "pwm%d_freq",
};
+static const char * const hwmon_intrusion_attr_templates[] = {
+ [hwmon_intrusion_alarm] = "intrusion%d_alarm",
+ [hwmon_intrusion_beep] = "intrusion%d_beep",
+};
+
static const char * const *__templates[] = {
[hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
@@ -468,6 +495,7 @@ static const char * const *__templates[] = {
[hwmon_humidity] = hwmon_humidity_attr_templates,
[hwmon_fan] = hwmon_fan_attr_templates,
[hwmon_pwm] = hwmon_pwm_attr_templates,
+ [hwmon_intrusion] = hwmon_intrusion_attr_templates,
};
static const int __templates_size[] = {
@@ -480,6 +508,7 @@ static const int __templates_size[] = {
[hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
[hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
[hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+ [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
};
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
@@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
return n;
}
-static int hwmon_genattrs(struct device *dev,
- const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
struct attribute **attrs,
const struct hwmon_ops *ops,
const struct hwmon_channel_info *info)
@@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev,
attr_mask &= ~BIT(attr);
if (attr >= template_size)
return -EINVAL;
- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+ a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
if (PTR_ERR(a) != -ENOENT)
@@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev,
}
static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
- const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
{
int ret, i, aindex = 0, nattrs = 0;
struct attribute **attrs;
@@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
if (nattrs == 0)
return ERR_PTR(-EINVAL);
- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return ERR_PTR(-ENOMEM);
for (i = 0; chip->info[i]; i++) {
- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
chip->info[i]);
- if (ret < 0)
+ if (ret < 0) {
+ hwmon_free_attrs(attrs);
return ERR_PTR(ret);
+ }
aindex += ret;
}
@@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
for (i = 0; groups[i]; i++)
ngroups++;
- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
- GFP_KERNEL);
+ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
if (!hwdev->groups) {
err = -ENOMEM;
goto free_hwmon;
}
- attrs = __hwmon_create_attrs(dev, drvdata, chip);
+ attrs = __hwmon_create_attrs(drvdata, chip);
if (IS_ERR(attrs)) {
err = PTR_ERR(attrs);
goto free_hwmon;
@@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwmon_temp_input, j))
continue;
if (info[i]->config[j] & HWMON_T_INPUT) {
- err = hwmon_thermal_add_sensor(dev,
- hwdev, j);
+ err = hwmon_thermal_add_sensor(hdev, j);
if (err) {
device_unregister(hdev);
/*
@@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
free_hwmon:
- kfree(hwdev);
+ hwmon_dev_release(hdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index b09c39abd3a8..eeac4b04df27 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -528,7 +528,7 @@ static int i5k_amb_probe(struct platform_device *pdev)
goto err;
}
- data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len);
+ data->amb_mmio = ioremap(data->amb_base, data->amb_len);
if (!data->amb_mmio) {
res = -EBUSY;
goto err_map_failed;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 5c1dddde193c..e39354ffe973 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,13 +1,29 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
+ * processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Implementation notes:
+ * - CCD register address information as well as the calculation to
+ * convert raw register values is from https://github.com/ocerman/zenpower.
+ * The information is not confirmed from chip datasheets, but experiments
+ * suggest that it provides reasonable temperature values.
+ * - Register addresses to read chip voltage and current are also from
+ * https://github.com/ocerman/zenpower, and not confirmed from chip
+ * datasheets. Current calibration is board specific and not typically
+ * shared by board vendors. For this reason, current values are
+ * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
+ * current. Reported values can be adjusted using the sensors configuration
+ * file.
*/
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#endif
/* CPUID function 0x80000001, ebx */
-#define CPUID_PKGTYPE_MASK 0xf0000000
+#define CPUID_PKGTYPE_MASK GENMASK(31, 28)
#define CPUID_PKGTYPE_F 0x00000000
#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
/* DRAM controller (PCI function 2) */
#define REG_DCT0_CONFIG_HIGH 0x094
-#define DDR3_MODE 0x00000100
+#define DDR3_MODE BIT(8)
/* miscellaneous (PCI function 3) */
#define REG_HARDWARE_THERMAL_CONTROL 0x64
-#define HTC_ENABLE 0x00000001
+#define HTC_ENABLE BIT(0)
#define REG_REPORTED_TEMPERATURE 0xa4
#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
-#define NB_CAP_HTC 0x00000400
+#define NB_CAP_HTC BIT(10)
/*
* For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
@@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
/* F17h M01h Access througn SMN */
#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4))
+#define F17H_M70H_CCD_TEMP_VALID BIT(11)
+#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0)
+
+#define F17H_M01H_SVI 0x0005A000
+#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10)
+
+#define CUR_TEMP_SHIFT 21
+#define CUR_TEMP_RANGE_SEL_MASK BIT(19)
+
+#define CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define CFACTOR_ISOC 250000 /* 0.25A / LSB */
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -67,6 +97,10 @@ struct k10temp_data {
int temp_offset;
u32 temp_adjust_mask;
bool show_tdie;
+ u32 show_tccd;
+ u32 svi_addr[2];
+ bool show_current;
+ int cfactor[2];
};
struct tctl_offset {
@@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
+static bool is_threadripper(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "Threadripper");
+}
+
+static bool is_epyc(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "EPYC");
+}
+
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
-static unsigned int get_raw_temp(struct k10temp_data *data)
+static long get_raw_temp(struct k10temp_data *data)
{
- unsigned int temp;
u32 regval;
+ long temp;
data->read_tempreg(data->pdev, &regval);
- temp = (regval >> 21) * 125;
+ temp = (regval >> CUR_TEMP_SHIFT) * 125;
if (regval & data->temp_adjust_mask)
temp -= 49000;
return temp;
}
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
+const char *k10temp_temp_label[] = {
+ "Tdie",
+ "Tctl",
+ "Tccd1",
+ "Tccd2",
+ "Tccd3",
+ "Tccd4",
+ "Tccd5",
+ "Tccd6",
+ "Tccd7",
+ "Tccd8",
+};
- if (temp > data->temp_offset)
- temp -= data->temp_offset;
- else
- temp = 0;
+const char *k10temp_in_label[] = {
+ "Vcore",
+ "Vsoc",
+};
- return sprintf(buf, "%u\n", temp);
-}
+const char *k10temp_curr_label[] = {
+ "Icore",
+ "Isoc",
+};
-static ssize_t temp2_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
-
- return sprintf(buf, "%u\n", temp);
+ switch (type) {
+ case hwmon_temp:
+ *str = k10temp_temp_label[channel];
+ break;
+ case hwmon_in:
+ *str = k10temp_in_label[channel];
+ break;
+ case hwmon_curr:
+ *str = k10temp_curr_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_label_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
- return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
+ switch (attr) {
+ case hwmon_curr_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], &regval);
+ *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
+ (regval & 0xff),
+ 1000);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp1_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
{
- return sprintf(buf, "%d\n", 70 * 1000);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
+
+ switch (attr) {
+ case hwmon_in_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], &regval);
+ regval = (regval >> 16) & 0xff;
+ *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct k10temp_data *data = dev_get_drvdata(dev);
- int show_hyst = attr->index;
u32 regval;
- int value;
- data->read_htcreg(data->pdev, &regval);
- value = ((regval >> 16) & 0x7f) * 500 + 52000;
- if (show_hyst)
- value -= ((regval >> 24) & 0xf) * 500;
- return sprintf(buf, "%d\n", value);
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie */
+ *val = get_raw_temp(data) - data->temp_offset;
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 1: /* Tctl */
+ *val = get_raw_temp(data);
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ F17H_M70H_CCD_TEMP(channel - 2), &regval);
+ *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp_max:
+ *val = 70 * 1000;
+ break;
+ case hwmon_temp_crit:
+ data->read_htcreg(data->pdev, &regval);
+ *val = ((regval >> 16) & 0x7f) * 500 + 52000;
+ break;
+ case hwmon_temp_crit_hyst:
+ data->read_htcreg(data->pdev, &regval);
+ *val = (((regval >> 16) & 0x7f)
+ - ((regval >> 24) & 0xf)) * 500 + 52000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static DEVICE_ATTR_RO(temp1_input);
-static DEVICE_ATTR_RO(temp1_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
-static DEVICE_ATTR_RO(temp2_input);
-static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
+static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return k10temp_read_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return k10temp_read_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return k10temp_read_curr(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static umode_t k10temp_is_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static umode_t k10temp_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct k10temp_data *data = dev_get_drvdata(dev);
+ const struct k10temp_data *data = _data;
struct pci_dev *pdev = data->pdev;
u32 reg;
- switch (index) {
- case 0 ... 1: /* temp1_input, temp1_max */
- default:
- break;
- case 2 ... 3: /* temp1_crit, temp1_crit_hyst */
- if (!data->read_htcreg)
- return 0;
-
- pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
- &reg);
- if (!(reg & NB_CAP_HTC))
- return 0;
-
- data->read_htcreg(data->pdev, &reg);
- if (!(reg & HTC_ENABLE))
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie, or Tctl if we don't show it */
+ break;
+ case 1: /* Tctl */
+ if (!data->show_tdie)
+ return 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case hwmon_temp_max:
+ if (channel || data->show_tdie)
+ return 0;
+ break;
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel || !data->read_htcreg)
+ return 0;
+
+ pci_read_config_dword(pdev,
+ REG_NORTHBRIDGE_CAPABILITIES,
+ &reg);
+ if (!(reg & NB_CAP_HTC))
+ return 0;
+
+ data->read_htcreg(data->pdev, &reg);
+ if (!(reg & HTC_ENABLE))
+ return 0;
+ break;
+ case hwmon_temp_label:
+ /* No labels if we don't show the die temperature */
+ if (!data->show_tdie)
+ return 0;
+ switch (channel) {
+ case 0: /* Tdie */
+ case 1: /* Tctl */
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
return 0;
+ }
break;
- case 4 ... 6: /* temp1_label, temp2_input, temp2_label */
- if (!data->show_tdie)
+ case hwmon_in:
+ case hwmon_curr:
+ if (!data->show_current)
return 0;
break;
+ default:
+ return 0;
}
- return attr->mode;
+ return 0444;
}
-static struct attribute *k10temp_attrs[] = {
- &dev_attr_temp1_input.attr,
- &dev_attr_temp1_max.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &dev_attr_temp2_input.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group k10temp_group = {
- .attrs = k10temp_attrs,
- .is_visible = k10temp_is_visible,
-};
-__ATTRIBUTE_GROUPS(k10temp);
-
static bool has_erratum_319(struct pci_dev *pdev)
{
u32 pkg_type, reg_dram_cfg;
@@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev)
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
-static int k10temp_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+#ifdef CONFIG_DEBUG_FS
+
+static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
+ u32 addr, int count)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (!(i & 3))
+ seq_printf(s, "0x%06x: ", addr + i * 4);
+ amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, &reg);
+ seq_printf(s, "%08x ", reg);
+ if ((i & 3) == 3)
+ seq_puts(s, "\n");
+ }
+}
+
+static int svi_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(svi);
+
+static int thm_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(thm);
+
+static void k10temp_debugfs_cleanup(void *ddir)
+{
+ debugfs_remove_recursive(ddir);
+}
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+ struct dentry *debugfs;
+ char name[32];
+
+ /* Only show debugfs data for Family 17h/18h CPUs */
+ if (!data->show_tdie)
+ return;
+
+ scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
+
+ debugfs = debugfs_create_dir(name, NULL);
+ if (debugfs) {
+ debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
+ debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
+ devm_add_action_or_reset(&data->pdev->dev,
+ k10temp_debugfs_cleanup, debugfs);
+ }
+}
+
+#else
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+}
+
+#endif
+
+static const struct hwmon_channel_info *k10temp_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops k10temp_hwmon_ops = {
+ .is_visible = k10temp_is_visible,
+ .read = k10temp_read,
+ .read_string = k10temp_read_labels,
+};
+
+static const struct hwmon_chip_info k10temp_chip_info = {
+ .ops = &k10temp_hwmon_ops,
+ .info = k10temp_info,
+};
+
+static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ struct k10temp_data *data, int limit)
+{
+ u32 regval;
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ F17H_M70H_CCD_TEMP(i), &regval);
+ if (regval & F17H_M70H_CCD_TEMP_VALID)
+ data->show_tccd |= BIT(i);
+ }
+}
+
+static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
@@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev,
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
- data->temp_adjust_mask = 0x80000;
+ data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_f17;
data->show_tdie = true;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x1: /* Zen */
+ case 0x8: /* Zen+ */
+ case 0x11: /* Zen APU */
+ case 0x18: /* Zen+ APU */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ k10temp_get_ccd_support(pdev, data, 4);
+ break;
+ case 0x31: /* Zen2 Threadripper */
+ case 0x71: /* Zen2 */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
+ }
} else {
data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
@@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev,
}
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
- k10temp_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
+ &k10temp_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ k10temp_init_debugfs(data);
+
+ return 0;
}
static const struct pci_device_id k10temp_id_table[] = {
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
new file mode 100644
index 000000000000..eb22a34dc36b
--- /dev/null
+++ b/drivers/hwmon/max31730.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX31730 3-Channel Remote Temperature Sensor
+ *
+ * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c,
+ 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+
+/* The MAX31730 registers */
+#define MAX31730_REG_TEMP 0x00
+#define MAX31730_REG_CONF 0x13
+#define MAX31730_STOP BIT(7)
+#define MAX31730_EXTRANGE BIT(1)
+#define MAX31730_REG_TEMP_OFFSET 0x16
+#define MAX31730_TEMP_OFFSET_BASELINE 0x77
+#define MAX31730_REG_OFFSET_ENABLE 0x17
+#define MAX31730_REG_TEMP_MAX 0x20
+#define MAX31730_REG_TEMP_MIN 0x30
+#define MAX31730_REG_STATUS_HIGH 0x32
+#define MAX31730_REG_STATUS_LOW 0x33
+#define MAX31730_REG_CHANNEL_ENABLE 0x35
+#define MAX31730_REG_TEMP_FAULT 0x36
+
+#define MAX31730_REG_MFG_ID 0x50
+#define MAX31730_MFG_ID 0x4d
+#define MAX31730_REG_MFG_REV 0x51
+#define MAX31730_MFG_REV 0x01
+
+#define MAX31730_TEMP_MIN (-128000)
+#define MAX31730_TEMP_MAX 127937
+
+/* Each client has this additional data */
+struct max31730_data {
+ struct i2c_client *client;
+ u8 orig_conf;
+ u8 current_conf;
+ u8 offset_enable;
+ u8 channel_enable;
+};
+
+/*-----------------------------------------------------------------------*/
+
+static inline long max31730_reg_to_mc(s16 temp)
+{
+ return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16);
+}
+
+static int max31730_write_config(struct max31730_data *data, u8 set_mask,
+ u8 clr_mask)
+{
+ u8 value;
+
+ clr_mask |= MAX31730_EXTRANGE;
+ value = data->current_conf & ~clr_mask;
+ value |= set_mask;
+
+ if (data->current_conf != value) {
+ s32 err;
+
+ err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF,
+ value);
+ if (err)
+ return err;
+ data->current_conf = value;
+ }
+ return 0;
+}
+
+static int max31730_set_enable(struct i2c_client *client, int reg,
+ u8 *confdata, int channel, bool enable)
+{
+ u8 regval = *confdata;
+ int err;
+
+ if (enable)
+ regval |= BIT(channel);
+ else
+ regval &= ~BIT(channel);
+
+ if (regval != *confdata) {
+ err = i2c_smbus_write_byte_data(client, reg, regval);
+ if (err)
+ return err;
+ *confdata = regval;
+ }
+ return 0;
+}
+
+static int max31730_set_offset_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE,
+ &data->offset_enable, channel, enable);
+}
+
+static int max31730_set_channel_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE,
+ &data->channel_enable, channel, enable);
+}
+
+static int max31730_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int regval, reg, offset;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!(data->channel_enable & BIT(channel)))
+ return -ENODATA;
+ reg = MAX31730_REG_TEMP + (channel * 2);
+ break;
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + (channel * 2);
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ *val = !!(data->channel_enable & BIT(channel));
+ return 0;
+ case hwmon_temp_offset:
+ if (!channel)
+ return -EINVAL;
+ if (!(data->offset_enable & BIT(channel))) {
+ *val = 0;
+ return 0;
+ }
+ offset = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET);
+ if (offset < 0)
+ return offset;
+ *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125;
+ return 0;
+ case hwmon_temp_fault:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_FAULT);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_min_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_LOW);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_max_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_HIGH);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ regval = i2c_smbus_read_word_swapped(data->client, reg);
+ if (regval < 0)
+ return regval;
+
+ *val = max31730_reg_to_mc(regval);
+
+ return 0;
+}
+
+static int max31730_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int reg, err;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + channel * 2;
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ return max31730_set_channel_enable(data, channel, val);
+ case hwmon_temp_offset:
+ val = clamp_val(val, -14875, 17000) + 14875;
+ val = DIV_ROUND_CLOSEST(val, 125);
+ err = max31730_set_offset_enable(data, channel,
+ val != MAX31730_TEMP_OFFSET_BASELINE);
+ if (err)
+ return err;
+ return i2c_smbus_write_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET, val);
+ default:
+ return -EINVAL;
+ }
+
+ val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX);
+ val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4;
+
+ return i2c_smbus_write_word_swapped(data->client, reg, (u16)val);
+}
+
+static umode_t max31730_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_min:
+ return channel ? 0444 : 0644;
+ case hwmon_temp_offset:
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *max31730_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT
+ ),
+ NULL
+};
+
+static const struct hwmon_ops max31730_hwmon_ops = {
+ .is_visible = max31730_is_visible,
+ .read = max31730_read,
+ .write = max31730_write,
+};
+
+static const struct hwmon_chip_info max31730_chip_info = {
+ .ops = &max31730_hwmon_ops,
+ .info = max31730_info,
+};
+
+static void max31730_remove(void *data)
+{
+ struct max31730_data *max31730 = data;
+ struct i2c_client *client = max31730->client;
+
+ i2c_smbus_write_byte_data(client, MAX31730_REG_CONF,
+ max31730->orig_conf);
+}
+
+static int
+max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct max31730_data *data;
+ int status, err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EIO;
+
+ data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+
+ /* Cache original configuration and enable status */
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE);
+ if (status < 0)
+ return status;
+ data->channel_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE);
+ if (status < 0)
+ return status;
+ data->offset_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF);
+ if (status < 0)
+ return status;
+ data->orig_conf = status;
+ data->current_conf = status;
+
+ err = max31730_write_config(data,
+ data->channel_enable ? 0 : MAX31730_STOP,
+ data->channel_enable ? MAX31730_STOP : 0);
+ if (err)
+ return err;
+
+ dev_set_drvdata(dev, data);
+
+ err = devm_add_action_or_reset(dev, max31730_remove, data);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max31730_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31730_ids[] = {
+ { "max31730", 0, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31730_ids);
+
+static const struct of_device_id __maybe_unused max31730_of_match[] = {
+ {
+ .compatible = "maxim,max31730",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max31730_of_match);
+
+static bool max31730_check_reg_temp(struct i2c_client *client,
+ int reg)
+{
+ int regval;
+
+ regval = i2c_smbus_read_byte_data(client, reg + 1);
+ return regval < 0 || (regval & 0x0f);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max31730_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID);
+ if (regval != MAX31730_MFG_ID)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV);
+ if (regval != MAX31730_MFG_REV)
+ return -ENODEV;
+
+ /* lower 4 bit of temperature and limit registers must be 0 */
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN))
+ return -ENODEV;
+
+ for (i = 0; i < 4; i++) {
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2))
+ return -ENODEV;
+ if (max31730_check_reg_temp(client,
+ MAX31730_REG_TEMP_MAX + i * 2))
+ return -ENODEV;
+ }
+
+ strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int __maybe_unused max31730_suspend(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, MAX31730_STOP, 0);
+}
+
+static int __maybe_unused max31730_resume(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, 0, MAX31730_STOP);
+}
+
+static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+
+static struct i2c_driver max31730_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max31730",
+ .of_match_table = of_match_ptr(max31730_of_match),
+ .pm = &max31730_pm_ops,
+ },
+ .probe = max31730_probe,
+ .id_table = max31730_ids,
+ .detect = max31730_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(max31730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX31730 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index f3dd2a17bd42..2e97e56c72c7 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -23,8 +23,8 @@
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
- { 0x40, 0x00, 0x42, 0x44, 0x46 },
- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+ { 0x46, 0x00, 0x40, 0x42, 0x44 },
+ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
};
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
struct nct7802_data {
struct regmap *regmap;
struct mutex access_lock; /* for multi-byte read and write operations */
+ u8 in_status;
+ struct mutex in_alarm_lock;
};
static ssize_t temp_type_show(struct device *dev,
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
return err ? : count;
}
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ int volt, min, max, ret;
+ unsigned int val;
+
+ mutex_lock(&data->in_alarm_lock);
+
+ /*
+ * The SMI Voltage status register is the only register giving a status
+ * for voltages. A bit is set for each input crossing a threshold, in
+ * both direction, but the "inside" or "outside" limits info is not
+ * available. Also this register is cleared on read.
+ * Note: this is not explicitly spelled out in the datasheet, but
+ * from experiment.
+ * To deal with this we use a status cache with one validity bit and
+ * one status bit for each input. Validity is cleared at startup and
+ * each time the register reports a change, and the status is processed
+ * by software based on current input value and limits.
+ */
+ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+ if (ret < 0)
+ goto abort;
+
+ /* invalidate cached status for all inputs crossing a threshold */
+ data->in_status &= ~((val & 0x0f) << 4);
+
+ /* if cached status for requested input is invalid, update it */
+ if (!(data->in_status & (0x10 << sattr->index))) {
+ ret = nct7802_read_voltage(data, sattr->nr, 0);
+ if (ret < 0)
+ goto abort;
+ volt = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 1);
+ if (ret < 0)
+ goto abort;
+ min = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 2);
+ if (ret < 0)
+ goto abort;
+ max = ret;
+
+ if (volt < min || volt > max)
+ data->in_status |= (1 << sattr->index);
+ else
+ data->in_status &= ~(1 << sattr->index);
+
+ data->in_status |= 0x10 << sattr->index;
+ }
+
+ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+ mutex_unlock(&data->in_alarm_lock);
+ return ret;
+}
+
static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
static struct attribute *nct7802_in_attrs[] = {
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
return PTR_ERR(data->regmap);
mutex_init(&data->access_lock);
+ mutex_init(&data->in_alarm_lock);
ret = nct7802_init_chip(data);
if (ret < 0)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 59859979571d..a9ea06204767 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -20,8 +20,8 @@ config SENSORS_PMBUS
help
If you say yes here you get hardware monitoring support for generic
PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
- TPS544B25, TPS544C20, TPS544C25, and UDT020.
+ MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
+ TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -145,6 +145,15 @@ config SENSORS_MAX16064
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX20730
+ tristate "Maxim MAX20730, MAX20734, MAX20743"
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX20730, MAX20734, and MAX20743.
+
+ This driver can also be built as a module. If so, the module will
+ be called max20730.
+
config SENSORS_MAX20751
tristate "Maxim MAX20751"
help
@@ -200,20 +209,20 @@ config SENSORS_TPS40422
be called tps40422.
config SENSORS_TPS53679
- tristate "TI TPS53679"
+ tristate "TI TPS53679, TPS53688"
help
If you say yes here you get hardware monitoring support for TI
- TPS53679.
+ TPS53679, TPS53688
This driver can also be built as a module. If so, the module will
be called tps53679.
config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
+ tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
- Health Controllers.
+ UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer
+ and System Health Controllers.
This driver can also be built as a module. If so, the module will
be called ucd9000.
@@ -228,6 +237,15 @@ config SENSORS_UCD9200
This driver can also be built as a module. If so, the module will
be called ucd9200.
+config SENSORS_XDPE122
+ tristate "Infineon XDPE122 family"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ XDPE12254, XDPE12284, device.
+
+ This driver can also be built as a module. If so, the module will
+ be called xdpe12284.
+
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 3f8c1014938b..5feb45806123 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d359b76bcb36..3795fe55b84f 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -20,12 +20,15 @@
#define CFFPS_FRU_CMD 0x9A
#define CFFPS_PN_CMD 0x9B
+#define CFFPS_HEADER_CMD 0x9C
#define CFFPS_SN_CMD 0x9E
+#define CFFPS_MAX_POWER_OUT_CMD 0xA7
#define CFFPS_CCIN_CMD 0xBD
#define CFFPS_FW_CMD 0xFA
#define CFFPS1_FW_NUM_BYTES 4
#define CFFPS2_FW_NUM_WORDS 3
#define CFFPS_SYS_CONFIG_CMD 0xDA
+#define CFFPS_12VCS_VOUT_CMD 0xDE
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
@@ -44,22 +47,21 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
-/*
- * LED off state actually relinquishes LED control to PSU firmware, so it can
- * turn on the LED for faults.
- */
-#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
CFFPS_DEBUGFS_INPUT_HISTORY = 0,
CFFPS_DEBUGFS_FRU,
CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_HEADER,
CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_MAX_POWER_OUT,
CFFPS_DEBUGFS_CCIN,
CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_ON_OFF_CONFIG,
CFFPS_DEBUGFS_NUM_ENTRIES
};
@@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
psu->input_history.byte_count);
}
-static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
u8 cmd;
int i, rc;
int *idxp = file->private_data;
int idx = *idxp;
struct ibm_cffps *psu = to_psu(idxp, idx);
- char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
pmbus_set_page(psu->client, 0);
@@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
case CFFPS_DEBUGFS_PN:
cmd = CFFPS_PN_CMD;
break;
+ case CFFPS_DEBUGFS_HEADER:
+ cmd = CFFPS_HEADER_CMD;
+ break;
case CFFPS_DEBUGFS_SN:
cmd = CFFPS_SN_CMD;
break;
+ case CFFPS_DEBUGFS_MAX_POWER_OUT:
+ rc = i2c_smbus_read_word_swapped(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc);
+ goto done;
case CFFPS_DEBUGFS_CCIN:
rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
if (rc < 0)
@@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
return -EOPNOTSUPP;
}
goto done;
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ rc = i2c_smbus_read_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 3, "%02x", rc);
+ goto done;
default:
return -EINVAL;
}
@@ -214,9 +235,42 @@ done:
return simple_read_from_buffer(buf, count, ppos, data, rc);
}
+static ssize_t ibm_cffps_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ u8 data;
+ ssize_t rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ pmbus_set_page(psu->client, 0);
+
+ rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
+ if (rc <= 0)
+ return rc;
+
+ rc = i2c_smbus_write_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG, data);
+ if (rc)
+ return rc;
+
+ rc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
static const struct file_operations ibm_cffps_fops = {
.llseek = noop_llseek,
- .read = ibm_cffps_debugfs_op,
+ .read = ibm_cffps_debugfs_read,
+ .write = ibm_cffps_debugfs_write,
.open = simple_open,
};
@@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
if (mfr & CFFPS_MFR_PS_KILL)
rc |= PB_STATUS_OFF;
break;
+ case PMBUS_VIRT_READ_VMON:
+ rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+ break;
default:
rc = -ENODATA;
break;
@@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
rc = devm_led_classdev_register(dev, &psu->led);
if (rc)
dev_warn(dev, "failed to register led class: %d\n", rc);
+ else
+ i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_OFF);
}
static struct pmbus_driver_info ibm_cffps_info[] = {
@@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
- PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON,
.func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
@@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client,
debugfs_create_file("part_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_PN],
&ibm_cffps_fops);
+ debugfs_create_file("header", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER],
+ &ibm_cffps_fops);
debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_SN],
&ibm_cffps_fops);
+ debugfs_create_file("max_power_out", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT],
+ &ibm_cffps_fops);
debugfs_create_file("ccin", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
&ibm_cffps_fops);
debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_FW],
&ibm_cffps_fops);
+ debugfs_create_file("on_off_config", 0644, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG],
+ &ibm_cffps_fops);
return 0;
}
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
new file mode 100644
index 000000000000..294e2212f61e
--- /dev/null
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down
+ * Switching Regulators
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include <linux/util_macros.h>
+#include "pmbus.h"
+
+enum chips {
+ max20730,
+ max20734,
+ max20743
+};
+
+struct max20730_data {
+ enum chips id;
+ struct pmbus_driver_info info;
+ struct mutex lock; /* Used to protect against parallel writes */
+ u16 mfr_devset1;
+};
+
+#define to_max20730_data(x) container_of(x, struct max20730_data, info)
+
+#define MAX20730_MFR_DEVSET1 0xd2
+
+/*
+ * Convert discreet value to direct data format. Strictly speaking, all passed
+ * values are constants, so we could do that calculation manually. On the
+ * downside, that would make the driver more difficult to maintain, so lets
+ * use this approach.
+ */
+static u16 val_to_direct(int v, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3; /* take milli-units into account */
+ int b = info->b[class] * 1000;
+ long d;
+
+ d = v * info->m[class] + b;
+ /*
+ * R < 0 is true for all callers, so we don't need to bother
+ * about the R > 0 case.
+ */
+ while (R < 0) {
+ d = DIV_ROUND_CLOSEST(d, 10);
+ R++;
+ }
+ return (u16)d;
+}
+
+static long direct_to_val(u16 w, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3;
+ int b = info->b[class] * 1000;
+ int m = info->m[class];
+ long d = (s16)w;
+
+ if (m == 0)
+ return 0;
+
+ while (R < 0) {
+ d *= 10;
+ R++;
+ }
+ d = (d - b) / m;
+ return d;
+}
+
+static u32 max_current[][5] = {
+ [max20730] = { 13000, 16600, 20100, 23600 },
+ [max20734] = { 21000, 27000, 32000, 38000 },
+ [max20743] = { 18900, 24100, 29200, 34100 },
+};
+
+static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct max20730_data *data = to_max20730_data(info);
+ int ret = 0;
+ u32 max_c;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ switch ((data->mfr_devset1 >> 11) & 0x3) {
+ case 0x0:
+ ret = val_to_direct(150000, PSC_TEMPERATURE, info);
+ break;
+ case 0x1:
+ ret = val_to_direct(130000, PSC_TEMPERATURE, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
+ ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max20730_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ struct pmbus_driver_info *info;
+ struct max20730_data *data;
+ u16 devset1;
+ int ret = 0;
+ int idx;
+
+ info = (struct pmbus_driver_info *)pmbus_get_driver_info(client);
+ data = to_max20730_data(info);
+
+ mutex_lock(&data->lock);
+ devset1 = data->mfr_devset1;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ devset1 &= ~(BIT(11) | BIT(12));
+ if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000)
+ devset1 |= BIT(11);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ devset1 &= ~(BIT(5) | BIT(6));
+
+ idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info),
+ max_current[data->id], 4);
+ devset1 |= (idx << 5);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ if (!ret && devset1 != data->mfr_devset1) {
+ ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1,
+ devset1);
+ if (!ret) {
+ data->mfr_devset1 = devset1;
+ pmbus_clear_cache(client);
+ }
+ }
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct pmbus_driver_info max20730_info[] = {
+ [max20730] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3609,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ /*
+ * Values in the datasheet are adjusted for temperature and
+ * for the relationship between Vin and Vout.
+ * Unfortunately, the data sheet suggests that Vout measurement
+ * may be scaled with a resistor array. This is indeed the case
+ * at least on the evaulation boards. As a result, any in-driver
+ * adjustments would either be wrong or require elaborate means
+ * to configure the scaling. Instead of doing that, just report
+ * raw values and let userspace handle adjustments.
+ */
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 153,
+ .b[PSC_CURRENT_OUT] = 4976,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20734] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6209 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3592,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 111,
+ .b[PSC_CURRENT_OUT] = 3461,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20743] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3597,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 95,
+ .b[PSC_CURRENT_OUT] = 5014,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+};
+
+static int max20730_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct max20730_data *data;
+ enum chips chip_id;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 5 || strncmp(buf, "MAXIM", 5)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ /*
+ * The chips support reading PMBUS_MFR_MODEL. On both MAX20730
+ * and MAX20734, reading it returns M20743. Presumably that is
+ * the reason why the command is not documented. Unfortunately,
+ * that means that there is no reliable means to detect the chip.
+ * However, we can at least detect the chip series. Compare
+ * the returned value against 'M20743' and bail out if there is
+ * a mismatch. If that doesn't work for all chips, we may have
+ * to remove this check.
+ */
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ if (ret != 6 || strncmp(buf, "M20743", 6)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Revision\n");
+ return ret;
+ }
+ if (ret != 1 || buf[0] != 'F') {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ if (client->dev.of_node)
+ chip_id = (enum chips)of_device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->id = chip_id;
+ mutex_init(&data->lock);
+ memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+
+ ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
+ if (ret < 0)
+ return ret;
+ data->mfr_devset1 = ret;
+
+ return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max20730_id[] = {
+ { "max20730", max20730 },
+ { "max20734", max20734 },
+ { "max20743", max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20730_id);
+
+static const struct of_device_id max20730_of_match[] = {
+ { .compatible = "maxim,max20730", .data = (void *)max20730 },
+ { .compatible = "maxim,max20734", .data = (void *)max20734 },
+ { .compatible = "maxim,max20743", .data = (void *)max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max20730_of_match);
+
+static struct i2c_driver max20730_driver = {
+ .driver = {
+ .name = "max20730",
+ .of_match_table = max20730_of_match,
+ },
+ .probe = max20730_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max20730_id,
+};
+
+module_i2c_driver(max20730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index ee5f0cdbde06..da3c38cb9a5c 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = vid,
- .vrm_version = vr12,
+ .vrm_version[0] = vr12,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_OUT] = linear,
.format[PSC_POWER] = linear,
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index c0bc43d01018..51e8312b6c2d 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client,
}
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- int vout_mode;
+ int vout_mode, i;
vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
@@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client,
break;
case 1:
info->format[PSC_VOLTAGE_OUT] = vid;
- info->vrm_version = vr11;
+ for (i = 0; i < info->pages; i++)
+ info->vrm_version[i] = vr11;
break;
case 2:
info->format[PSC_VOLTAGE_OUT] = direct;
@@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = {
{"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"max20796", (kernel_ulong_t)&pmbus_info_one},
{"mdt040", (kernel_ulong_t)&pmbus_info_one},
{"ncp4200", (kernel_ulong_t)&pmbus_info_one},
{"ncp4208", (kernel_ulong_t)&pmbus_info_one},
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index d198af3a92b6..13b34bd67f23 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -22,6 +22,8 @@ enum pmbus_regs {
PMBUS_CLEAR_FAULTS = 0x03,
PMBUS_PHASE = 0x04,
+ PMBUS_WRITE_PROTECT = 0x10,
+
PMBUS_CAPABILITY = 0x19,
PMBUS_QUERY = 0x1A,
@@ -226,6 +228,15 @@ enum pmbus_regs {
#define PB_OPERATION_CONTROL_ON BIT(7)
/*
+ * WRITE_PROTECT
+ */
+#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */
+#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */
+#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */
+
+#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT)
+
+/*
* CAPABILITY
*/
#define PB_CAPABILITY_SMBALERT BIT(4)
@@ -377,12 +388,12 @@ enum pmbus_sensor_classes {
#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12, vr13 };
+enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
struct pmbus_driver_info {
int pages; /* Total number of pages */
enum pmbus_data_format format[PSC_NUM_CLASSES];
- enum vrm_version vrm_version;
+ enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8470097907bc..d9c17feb7b4a 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
long val = sensor->data;
long rv = 0;
- switch (data->info->vrm_version) {
+ switch (data->info->vrm_version[sensor->page]) {
case vr11:
if (val >= 0x02 && val <= 0xb2)
rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
@@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
if (val >= 0x01)
rv = 500 + (val - 1) * 10;
break;
+ case imvp9:
+ if (val >= 0x01)
+ rv = 200 + (val - 1) * 10;
+ break;
+ case amd625mv:
+ if (val >= 0x0 && val <= 0xd8)
+ rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100);
+ break;
}
return rv;
}
@@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
snprintf(sensor->name, sizeof(sensor->name), "%s%d",
name, seq);
+ if (data->flags & PMBUS_WRITE_PROTECTED)
+ readonly = true;
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
@@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
client->flags |= I2C_CLIENT_PEC;
+ /*
+ * Check if the chip is write protected. If it is, we can not clear
+ * faults, and we should not try it. Also, in that case, writes into
+ * limit registers need to be disabled.
+ */
+ ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ if (ret > 0 && (ret & PB_WP_ANY))
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+
if (data->info->pages)
pmbus_clear_faults(client);
else
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index ebe3f023f840..517584cff3de 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -19,26 +19,30 @@
static int pxe1610_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- u8 vout_mode;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_mode = ret & GENMASK(4, 0);
-
- switch (vout_mode) {
- case 1:
- info->vrm_version = vr12;
- break;
- case 2:
- info->vrm_version = vr13;
- break;
- default:
- return -ENODEV;
+ int i;
+
+ for (i = 0; i < PXE1610_NUM_PAGES; i++) {
+ if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) {
+ u8 vout_mode;
+ int ret;
+
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_mode = ret & GENMASK(4, 0);
+
+ switch (vout_mode) {
+ case 1:
+ info->vrm_version[i] = vr12;
+ break;
+ case 2:
+ info->vrm_version[i] = vr13;
+ break;
+ default:
+ return -ENODEV;
+ }
}
}
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 86bb3aca09ed..9c22e9013dd7 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
u8 vout_params;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_params = ret & GENMASK(4, 0);
-
- switch (vout_params) {
- case TPS53679_PROT_VR13_10MV:
- case TPS53679_PROT_VR12_5_10MV:
- info->vrm_version = vr13;
- break;
- case TPS53679_PROT_VR13_5MV:
- case TPS53679_PROT_VR12_5MV:
- case TPS53679_PROT_IMVP8_5MV:
- info->vrm_version = vr12;
- break;
- default:
- return -EINVAL;
+ int i, ret;
+
+ for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case TPS53679_PROT_VR13_10MV:
+ case TPS53679_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case TPS53679_PROT_VR13_5MV:
+ case TPS53679_PROT_VR12_5MV:
+ case TPS53679_PROT_IMVP8_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ default:
+ return -EINVAL;
+ }
}
return 0;
@@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client,
static const struct i2c_device_id tps53679_id[] = {
{"tps53679", 0},
+ {"tps53688", 0},
{}
};
@@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id);
static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
+ {.compatible = "ti,tps53688"},
{}
};
MODULE_DEVICE_TABLE(of, tps53679_of_match);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index a9229c6b0e84..23ea3415f166 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -18,7 +18,8 @@
#include <linux/gpio/driver.h>
#include "pmbus.h"
-enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+ ucd90910 };
#define UCD9000_MONITOR_CONFIG 0xd5
#define UCD9000_NUM_PAGES 0xd6
@@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_GPIO_OUTPUT 1
#define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07)
-#define UCD9000_MON_PAGE(x) ((x) & 0x0f)
+#define UCD9000_MON_PAGE(x) ((x) & 0x1f)
#define UCD9000_MON_VOLTAGE 1
#define UCD9000_MON_TEMPERATURE 2
@@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_GPIO_NAME_LEN 16
#define UCD9090_NUM_GPIOS 23
#define UCD901XX_NUM_GPIOS 26
+#define UCD90320_NUM_GPIOS 84
#define UCD90910_NUM_GPIOS 26
#define UCD9000_DEBUGFS_NAME_LEN 24
#define UCD9000_GPI_COUNT 8
+#define UCD90320_GPI_COUNT 32
struct ucd9000_data {
u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
@@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = {
{"ucd90120", ucd90120},
{"ucd90124", ucd90124},
{"ucd90160", ucd90160},
+ {"ucd90320", ucd90320},
{"ucd9090", ucd9090},
{"ucd90910", ucd90910},
{}
@@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
.data = (void *)ucd90160
},
{
+ .compatible = "ti,ucd90320",
+ .data = (void *)ucd90320
+ },
+ {
.compatible = "ti,ucd9090",
.data = (void *)ucd9090
},
@@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
case ucd90160:
data->gpio.ngpio = UCD901XX_NUM_GPIOS;
break;
+ case ucd90320:
+ data->gpio.ngpio = UCD90320_NUM_GPIOS;
+ break;
case ucd90910:
data->gpio.ngpio = UCD90910_NUM_GPIOS;
break;
@@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val)
struct ucd9000_debugfs_entry *entry = data;
struct i2c_client *client = entry->client;
u8 buffer[I2C_SMBUS_BLOCK_MAX];
- int ret;
+ int ret, i;
ret = ucd9000_get_mfr_status(client, buffer);
if (ret < 0)
return ret;
/*
- * Attribute only created for devices with gpi fault bits at bits
- * 16-23, which is the second byte of the response.
+ * GPI fault bits are in sets of 8, two bytes from end of response.
*/
- *val = !!(buffer[1] & BIT(entry->index));
+ i = ret - 3 - entry->index / 8;
+ if (i >= 0)
+ *val = !!(buffer[i] & BIT(entry->index % 8));
return 0;
}
@@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
{
struct dentry *debugfs;
struct ucd9000_debugfs_entry *entries;
- int i;
+ int i, gpi_count;
char name[UCD9000_DEBUGFS_NAME_LEN];
debugfs = pmbus_get_debugfs_dir(client);
@@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
/*
* Of the chips this driver supports, only the UCD9090, UCD90160,
- * and UCD90910 report GPI faults in their MFR_STATUS register, so only
- * create the GPI fault debugfs attributes for those chips.
+ * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS
+ * register, so only create the GPI fault debugfs attributes for those
+ * chips.
*/
if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 ||
- mid->driver_data == ucd90910) {
+ mid->driver_data == ucd90320 || mid->driver_data == ucd90910) {
+ gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT
+ : UCD9000_GPI_COUNT;
entries = devm_kcalloc(&client->dev,
- UCD9000_GPI_COUNT, sizeof(*entries),
+ gpi_count, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- for (i = 0; i < UCD9000_GPI_COUNT; i++) {
+ for (i = 0; i < gpi_count; i++) {
entries[i].client = client;
entries[i].index = i;
scnprintf(name, UCD9000_DEBUGFS_NAME_LEN,
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
new file mode 100644
index 000000000000..3d47806ff4d3
--- /dev/null
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers
+ *
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
+#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */
+#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */
+#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
+#define XDPE122_PAGE_NUM 2
+
+static int xdpe122_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ u8 vout_params;
+ int i, ret;
+
+ for (i = 0; i < XDPE122_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case XDPE122_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case XDPE122_PROT_VR12_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ case XDPE122_PROT_IMVP9_10MV:
+ info->vrm_version[i] = imvp9;
+ break;
+ case XDPE122_AMD_625MV:
+ info->vrm_version[i] = amd625mv;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info xdpe122_info = {
+ .pages = XDPE122_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .identify = xdpe122_identify,
+};
+
+static int xdpe122_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+
+ info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id xdpe122_id[] = {
+ {"xdpe12254", 0},
+ {"xdpe12284", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, xdpe122_id);
+
+static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+ {.compatible = "infineon, xdpe12254"},
+ {.compatible = "infineon, xdpe12284"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, xdpe122_of_match);
+
+static struct i2c_driver xdpe122_driver = {
+ .driver = {
+ .name = "xdpe12284",
+ .of_match_table = of_match_ptr(xdpe122_of_match),
+ },
+ .probe = xdpe122_probe,
+ .remove = pmbus_do_remove,
+ .id_table = xdpe122_id,
+};
+
+module_i2c_driver(xdpe122_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 42ffd2e5182d..30b7b3ea8836 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_fan_suspend(struct device *dev)
+static int pwm_fan_disable(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
struct pwm_args args;
@@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev)
return 0;
}
+static void pwm_fan_shutdown(struct platform_device *pdev)
+{
+ pwm_fan_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fan_suspend(struct device *dev)
+{
+ return pwm_fan_disable(dev);
+}
+
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
@@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
+ .shutdown = pwm_fan_shutdown,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index eb171d15ac48..7ffadc2da57b 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -28,8 +28,6 @@
* w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3
* w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
* w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
- * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
- * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,7 +48,7 @@
enum kinds {
w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
- w83667hg, w83667hg_b, nct6775, nct6776,
+ w83667hg, w83667hg_b,
};
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
@@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = {
"w83627uhg",
"w83667hg",
"w83667hg",
- "nct6775",
- "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static unsigned short fan_debounce;
-module_param(fan_debounce, ushort, 0);
-MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
-
#define DRVNAME "w83627ehf"
/*
@@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_W83627UHG_ID 0xa230
#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
-#define SIO_NCT6775_ID 0xb470
-#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
-/* NCT6775F has its own fan divider registers */
-#define NCT6775_REG_FANDIV1 0x506
-#define NCT6775_REG_FANDIV2 0x507
-#define NCT6775_REG_FAN_DEBOUNCE 0xf0
-
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
@@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
-static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
-static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
-static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
-static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
-static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
-static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
-
-static const u16 NCT6775_REG_TEMP[]
- = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
-static const u16 NCT6775_REG_TEMP_CONFIG[]
- = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
-static const u16 NCT6775_REG_TEMP_HYST[]
- = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
-static const u16 NCT6775_REG_TEMP_OVER[]
- = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
-static const u16 NCT6775_REG_TEMP_SOURCE[]
- = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
-
static const char *const w83667hg_b_temp_label[] = {
"SYSTIN",
"CPUTIN",
@@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = {
"PECI Agent 4"
};
-static const char *const nct6775_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "AMD SB-TSI",
- "PECI Agent 0",
- "PECI Agent 1",
- "PECI Agent 2",
- "PECI Agent 3",
- "PECI Agent 4",
- "PECI Agent 5",
- "PECI Agent 6",
- "PECI Agent 7",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP"
-};
-
-static const char *const nct6776_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "SMBUSMASTER 0",
- "SMBUSMASTER 1",
- "SMBUSMASTER 2",
- "SMBUSMASTER 3",
- "SMBUSMASTER 4",
- "SMBUSMASTER 5",
- "SMBUSMASTER 6",
- "SMBUSMASTER 7",
- "PECI Agent 0",
- "PECI Agent 1",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP",
- "BYTE_TEMP"
-};
-
-#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP)
static int is_word_sized(u16 reg)
{
@@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
return 1350000U / (reg << divreg);
}
-static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
-{
- if ((reg & 0xff1f) == 0xff1f)
- return 0;
-
- reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
-
- if (reg == 0)
- return 0;
-
- return 1350000U / reg;
-}
-
-static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
-{
- if (reg == 0 || reg == 0xffff)
- return 0;
-
- /*
- * Even though the registers are 16 bit wide, the fan divisor
- * still applies.
- */
- return 1350000U / (reg << divreg);
-}
-
static inline unsigned int
div_from_reg(u8 reg)
{
@@ -418,7 +306,6 @@ struct w83627ehf_data {
int addr; /* IO base of hw monitor block */
const char *name;
- struct device *hwmon_dev;
struct mutex lock;
u16 reg_temp[NUM_REG_TEMP];
@@ -428,20 +315,10 @@ struct w83627ehf_data {
u8 temp_src[NUM_REG_TEMP];
const char * const *temp_label;
- const u16 *REG_PWM;
- const u16 *REG_TARGET;
- const u16 *REG_FAN;
- const u16 *REG_FAN_MIN;
- const u16 *REG_FAN_START_OUTPUT;
- const u16 *REG_FAN_STOP_OUTPUT;
- const u16 *REG_FAN_STOP_TIME;
const u16 *REG_FAN_MAX_OUTPUT;
const u16 *REG_FAN_STEP_OUTPUT;
const u16 *scale_in;
- unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
- unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
-
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -457,7 +334,6 @@ struct w83627ehf_data {
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
u8 has_fan_min; /* some fans don't have min register */
- bool has_fan_div;
u8 temp_type[3];
s8 temp_offset[3];
s16 temp[9];
@@ -494,6 +370,7 @@ struct w83627ehf_data {
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+ u8 have_vid:1;
#ifdef CONFIG_PM
/* Remember extra register values over suspend/resume */
@@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg,
}
/* This function assumes that the caller holds data->update_lock */
-static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
-{
- u8 reg;
-
- switch (nr) {
- case 0:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
- | (data->fan_div[0] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 1:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
- | ((data->fan_div[1] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 2:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
- | (data->fan_div[2] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- case 3:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
- | ((data->fan_div[3] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- }
-}
-
-/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
u8 reg;
@@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
}
}
-static void w83627ehf_write_fan_div_common(struct device *dev,
- struct w83627ehf_data *data, int nr)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_write_fan_div(data, nr);
- else
- w83627ehf_write_fan_div(data, nr);
-}
-
-static void nct6775_update_fan_div(struct w83627ehf_data *data)
-{
- u8 i;
-
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fan_div[0] = i & 0x7;
- data->fan_div[1] = (i & 0x70) >> 4;
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- data->fan_div[2] = i & 0x7;
- if (data->has_fan & (1<<3))
- data->fan_div[3] = (i & 0x70) >> 4;
-}
-
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
@@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
}
}
-static void w83627ehf_update_fan_div_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_update_fan_div(data);
- else
- w83627ehf_update_fan_div(data);
-}
-
-static void nct6775_update_pwm(struct w83627ehf_data *data)
-{
- int i;
- int pwmcfg, fanmodecfg;
-
- for (i = 0; i < data->pwm_num; i++) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- fanmodecfg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[i]);
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
- data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
- data->tolerance[i] = fanmodecfg & 0x0f;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
- }
-}
-
static void w83627ehf_update_pwm(struct w83627ehf_data *data)
{
int i;
@@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
& 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]);
data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
}
}
-static void w83627ehf_update_pwm_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
- nct6775_update_pwm(data);
- else
- w83627ehf_update_pwm(data);
-}
-
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
int i;
mutex_lock(&data->update_lock);
@@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
@@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (!(data->has_fan & (1 << i)))
continue;
- reg = w83627ehf_read_value(data, data->REG_FAN[i]);
- data->rpm[i] = data->fan_from_reg(reg,
- data->fan_div[i]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]);
+ data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]);
if (data->has_fan_min & (1 << i))
data->fan_min[i] = w83627ehf_read_value(data,
- data->REG_FAN_MIN[i]);
+ W83627EHF_REG_FAN_MIN[i]);
/*
* If we failed to measure the fan speed and clock
* divider can be increased, let's try that for next
* time
*/
- if (data->has_fan_div
- && (reg >= 0xff || (sio_data->kind == nct6775
- && reg == 0x00))
- && data->fan_div[i] < 0x07) {
+ if (reg >= 0xff && data->fan_div[i] < 0x07) {
dev_dbg(dev,
"Increasing fan%d clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div_common(dev, data, i);
+ w83627ehf_write_fan_div(data, i);
/* Preserve min limit if possible */
if ((data->has_fan_min & (1 << i))
&& data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- data->REG_FAN_MIN[i],
+ W83627EHF_REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
@@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->fan_start_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_START_OUTPUT[i]);
+ W83627EHF_REG_FAN_START_OUTPUT[i]);
data->fan_stop_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_OUTPUT[i]);
+ W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_TIME[i]);
+ W83627EHF_REG_FAN_STOP_TIME[i]);
if (data->REG_FAN_MAX_OUTPUT &&
data->REG_FAN_MAX_OUTPUT[i] != 0xff)
@@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->target_temp[i] =
w83627ehf_read_value(data,
- data->REG_TARGET[i]) &
+ W83627EHF_REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
}
@@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
return data;
}
-/*
- * Sysfs callback functions
- */
-#define show_in_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
- data->scale_in)); \
-}
-show_in_reg(in)
-show_in_reg(in_min)
-show_in_reg(in_max)
-
#define store_in_reg(REG, reg) \
-static ssize_t \
-store_in_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- unsigned long val; \
- int err; \
- err = kstrtoul(buf, 10, &val); \
- if (err < 0) \
- return err; \
+ if (val < 0) \
+ return -EINVAL; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
- w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
- data->in_##reg[nr]); \
+ data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \
+ w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \
+ data->in_##reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int
+store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01);
-}
-
-static struct sensor_device_attribute sda_in_input[] = {
- SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
- SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
- SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
- SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
- SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
- SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
- SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
- SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
- SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
- SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
-};
-
-static struct sensor_device_attribute sda_in_alarm[] = {
- SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
- SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
- SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
- SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
- SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
- SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21),
- SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20),
- SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16),
- SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17),
- SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19),
-};
-
-static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
-};
-
-static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
-};
-
-static ssize_t
-show_fan(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", data->rpm[nr]);
-}
-
-static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n",
- data->fan_from_reg_min(data->fan_min[nr],
- data->fan_div[nr]));
-}
-
-static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
-}
-
-static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
unsigned int reg;
u8 new_div;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
+ if (val < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
- if (!data->has_fan_div) {
- /*
- * Only NCT6776F for now, so we know that this is a 13 bit
- * register
- */
- if (!val) {
- val = 0xff1f;
- } else {
- if (val > 1350000U)
- val = 135000U;
- val = 1350000U / val;
- val = (val & 0x1f) | ((val << 3) & 0xff00);
- }
- data->fan_min[nr] = val;
- goto done; /* Leave fan divider alone */
- }
if (!val) {
/* No min limit, alarm disabled */
- data->fan_min[nr] = 255;
- new_div = data->fan_div[nr]; /* No change */
- dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1);
+ data->fan_min[channel] = 255;
+ new_div = data->fan_div[channel]; /* No change */
+ dev_info(dev, "fan%u low limit and alarm disabled\n",
+ channel + 1);
} else if ((reg = 1350000U / val) >= 128 * 255) {
/*
* Speed below this value cannot possibly be represented,
* even with the highest divider (128)
*/
- data->fan_min[nr] = 254;
+ data->fan_min[channel] = 254;
new_div = 7; /* 128 == (1 << 7) */
dev_warn(dev,
"fan%u low limit %lu below minimum %u, set to minimum\n",
- nr + 1, val, data->fan_from_reg_min(254, 7));
+ channel + 1, val, fan_from_reg8(254, 7));
} else if (!reg) {
/*
* Speed above this value cannot possibly be represented,
* even with the lowest divider (1)
*/
- data->fan_min[nr] = 1;
+ data->fan_min[channel] = 1;
new_div = 0; /* 1 == (1 << 0) */
dev_warn(dev,
"fan%u low limit %lu above maximum %u, set to maximum\n",
- nr + 1, val, data->fan_from_reg_min(1, 0));
+ channel + 1, val, fan_from_reg8(1, 0));
} else {
/*
* Automatically pick the best divider, i.e. the one such
@@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
reg >>= 1;
new_div++;
}
- data->fan_min[nr] = reg;
+ data->fan_min[channel] = reg;
}
/*
* Write both the fan clock divider (if it changed) and the new
* fan min (unconditionally)
*/
- if (new_div != data->fan_div[nr]) {
+ if (new_div != data->fan_div[channel]) {
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
- nr + 1, div_from_reg(data->fan_div[nr]),
+ channel + 1, div_from_reg(data->fan_div[channel]),
div_from_reg(new_div));
- data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div_common(dev, data, nr);
+ data->fan_div[channel] = new_div;
+ w83627ehf_write_fan_div(data, channel);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
-done:
- w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
- data->fan_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static struct sensor_device_attribute sda_fan_input[] = {
- SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
- SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
- SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
- SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
- SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
-
-static struct sensor_device_attribute sda_fan_alarm[] = {
- SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
- SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
- SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
- SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10),
- SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23),
-};
-
-static struct sensor_device_attribute sda_fan_min[] = {
- SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 0),
- SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 1),
- SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 2),
- SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 3),
- SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 4),
-};
-
-static struct sensor_device_attribute sda_fan_div[] = {
- SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
- SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
- SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
- SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
- SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
-};
-
-static ssize_t
-show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
-}
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel],
+ data->fan_min[channel]);
+ mutex_unlock(&data->update_lock);
-#define show_temp_reg(addr, reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
+ return 0;
}
-show_temp_reg(reg_temp, temp);
-show_temp_reg(reg_temp_over, temp_max);
-show_temp_reg(reg_temp_hyst, temp_max_hyst);
#define store_temp_reg(addr, reg) \
-static ssize_t \
-store_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- int err; \
- long val; \
- err = kstrtol(buf, 10, &val); \
- if (err < 0) \
- return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
+ data->reg[channel] = LM75_TEMP_TO_REG(val); \
+ w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_temp_reg(reg_temp_over, temp_max);
store_temp_reg(reg_temp_hyst, temp_max_hyst);
-static ssize_t
-show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-
- return sprintf(buf, "%d\n",
- data->temp_offset[sensor_attr->index] * 1000);
-}
-
-static ssize_t
-store_temp_offset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
mutex_lock(&data->update_lock);
- data->temp_offset[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val);
+ data->temp_offset[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val);
mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
-}
-
-static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
- SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
- SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
- SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
- SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
- SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
- SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_label[] = {
- SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
- SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
- SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
- SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
- SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
- SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
- SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
- SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
- SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 1),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 2),
- SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 3),
- SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 4),
- SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 5),
- SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 6),
- SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 7),
- SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 1),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 2),
- SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 3),
- SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 4),
- SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 5),
- SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 6),
- SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 7),
- SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 8),
-};
-
-static struct sensor_device_attribute sda_temp_alarm[] = {
- SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
- SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
- SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
-};
-
-static struct sensor_device_attribute sda_temp_type[] = {
- SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
- SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
- SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-};
-
-static struct sensor_device_attribute sda_temp_offset[] = {
- SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 0),
- SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 1),
- SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 2),
-};
-
-#define show_pwm_reg(reg) \
-static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", data->reg[nr]); \
+ return 0;
}
-show_pwm_reg(pwm_mode)
-show_pwm_reg(pwm_enable)
-show_pwm_reg(pwm)
-
-static ssize_t
-store_pwm_mode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
u16 reg;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (val > 1)
- return -EINVAL;
-
- /* On NCT67766F, DC mode is only supported for pwm1 */
- if (sio_data->kind == nct6776 && nr && val != 1)
+ if (val < 0 || val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- data->pwm_mode[nr] = val;
- reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]);
+ data->pwm_mode[channel] = val;
+ reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]);
if (!val)
- reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-store_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
- data->pwm[nr] = val;
- w83627ehf_write_value(data, data->REG_PWM[nr], val);
+ data->pwm[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
u16 reg;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
- return -EINVAL;
- /* SmartFan III mode is not supported on NCT6776F */
- if (sio_data->kind == nct6776 && val == 4)
+ if (!val || val < 0 ||
+ (val > 4 && val != data->pwm_enable_orig[channel]))
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm_enable[nr] = val;
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- reg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[nr]);
- reg &= 0x0f;
- reg |= (val - 1) << 4;
- w83627ehf_write_value(data,
- NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
- }
+ data->pwm_enable[channel] = val;
+ reg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[channel]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel],
+ reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-
#define show_tol_temp(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(data, data->REG_TARGET[nr], val);
+ w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
@@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /* Limit tolerance further for NCT6776F */
- if (sio_data->kind == nct6776 && val > 7)
- val = 7;
- reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
- }
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
-static struct sensor_device_attribute sda_pwm[] = {
- SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0),
- SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1),
- SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2),
- SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_mode[] = {
- SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 0),
- SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 1),
- SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 2),
- SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_enable[] = {
- SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 0),
- SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 1),
- SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 2),
- SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 3),
-};
-
-static struct sensor_device_attribute sda_target_temp[] = {
- SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 0),
- SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 1),
- SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 2),
- SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 3),
-};
-
-static struct sensor_device_attribute sda_tolerance[] = {
- SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 0),
- SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 1),
- SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 2),
- SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 3),
-};
+static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp,
+ store_target_temp, 0);
+static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp,
+ store_target_temp, 1);
+static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp,
+ store_target_temp, 2);
+static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp,
+ store_target_temp, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance,
+ store_tolerance, 0);
+static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance,
+ store_tolerance, 1);
+static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance,
+ store_tolerance, 2);
+static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance,
+ store_tolerance, 3);
/* Smart Fan registers */
@@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = {
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
val = clamp_val(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
}
-fan_functions(fan_start_output, FAN_START_OUTPUT)
-fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
-fan_functions(fan_max_output, FAN_MAX_OUTPUT)
-fan_functions(fan_step_output, FAN_STEP_OUTPUT)
+fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT)
+fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT)
#define fan_time_functions(reg, REG) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
} \
-fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-
-static ssize_t name_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
- SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 3),
- SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 3),
- SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 3),
- SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 3),
- SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 3),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
- SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 2),
- SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 2),
- SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 2),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays[] = {
- SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 0),
- SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 1),
- SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 0),
- SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 1),
- SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 0),
- SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 1),
-};
+fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME)
+
+static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 3);
+static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 3);
+
+static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 2);
+static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 2);
+
+static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 1);
+static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 1);
+static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 1);
/*
* pwm1 and pwm3 don't support max and step settings on all chips.
* Need to check support while generating/removing attribute files.
*/
-static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
- SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 0),
- SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 0),
- SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 1),
- SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 1),
- SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 2),
- SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 2),
-};
+static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 0);
+static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 1);
+static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 1);
+static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
struct w83627ehf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR_RO(cpu0_vid);
+DEVICE_ATTR_RO(cpu0_vid);
/* Case open detection */
-
-static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
-
- return sprintf(buf, "%d\n",
- !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
-}
-
-static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- u16 reg, mask;
+ const u16 mask = 0x80;
+ u16 reg;
- if (kstrtoul(buf, 10, &val) || val != 0)
+ if (val != 0 || channel != 0)
return -EINVAL;
- mask = to_sensor_dev_attr_2(attr)->nr;
-
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
@@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static struct sensor_device_attribute_2 sda_caseopen[] = {
- SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x80, 0x10),
- SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x40, 0x40),
-};
-
-/*
- * Driver and device management
- */
-
-static void w83627ehf_device_remove_files(struct device *dev)
+static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- /*
- * some entries in the following arrays may not have been used in
- * device_create_file(), but device_remove_file() will ignore them
- */
- int i;
+ struct device *dev = container_of(kobj, struct device, kobj);
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct device_attribute *devattr;
+ struct sensor_device_attribute *sda;
+
+ devattr = container_of(a, struct device_attribute, attr);
+
+ /* Not sensor */
+ if (devattr->show == cpu0_vid_show && data->have_vid)
+ return a->mode;
+
+ sda = (struct sensor_device_attribute *)devattr;
+
+ if (sda->index < 2 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index < 3 &&
+ (devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output) &&
+ data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff)
+ return a->mode;
+
+ /* if fan3 and fan4 are enabled create the files for them */
+ if (sda->index == 2 &&
+ (data->has_fan & (1 << 2)) && data->pwm_num >= 3 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index == 3 &&
+ (data->has_fan & (1 << 3)) && data->pwm_num >= 4 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output ||
+ devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output))
+ return a->mode;
+
+ if ((devattr->show == show_target_temp ||
+ devattr->show == show_tolerance) &&
+ (data->has_fan & (1 << sda->index)) &&
+ sda->index < data->pwm_num)
+ return a->mode;
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
- device_remove_file(dev, &attr->dev_attr);
- }
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- device_remove_file(dev, &sda_in_input[i].dev_attr);
- device_remove_file(dev, &sda_in_alarm[i].dev_attr);
- device_remove_file(dev, &sda_in_min[i].dev_attr);
- device_remove_file(dev, &sda_in_max[i].dev_attr);
- }
- for (i = 0; i < 5; i++) {
- device_remove_file(dev, &sda_fan_input[i].dev_attr);
- device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
- device_remove_file(dev, &sda_fan_div[i].dev_attr);
- device_remove_file(dev, &sda_fan_min[i].dev_attr);
- }
- for (i = 0; i < data->pwm_num; i++) {
- device_remove_file(dev, &sda_pwm[i].dev_attr);
- device_remove_file(dev, &sda_pwm_mode[i].dev_attr);
- device_remove_file(dev, &sda_pwm_enable[i].dev_attr);
- device_remove_file(dev, &sda_target_temp[i].dev_attr);
- device_remove_file(dev, &sda_tolerance[i].dev_attr);
- }
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- device_remove_file(dev, &sda_temp_input[i].dev_attr);
- device_remove_file(dev, &sda_temp_label[i].dev_attr);
- if (i == 2 && data->temp3_val_only)
- continue;
- device_remove_file(dev, &sda_temp_max[i].dev_attr);
- device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
- if (i > 2)
- continue;
- device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
- device_remove_file(dev, &sda_temp_type[i].dev_attr);
- device_remove_file(dev, &sda_temp_offset[i].dev_attr);
- }
+ return 0;
+}
+
+/* These groups handle non-standard attributes used in this device */
+static struct attribute *w83627ehf_attrs[] = {
+
+ &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm1_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_target.dev_attr.attr,
+ &sensor_dev_attr_pwm1_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm2_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_target.dev_attr.attr,
+ &sensor_dev_attr_pwm2_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm3_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_target.dev_attr.attr,
+ &sensor_dev_attr_pwm3_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm4_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_target.dev_attr.attr,
+ &sensor_dev_attr_pwm4_tolerance.dev_attr.attr,
+
+ &dev_attr_cpu0_vid.attr,
+ NULL
+};
- device_remove_file(dev, &sda_caseopen[0].dev_attr);
- device_remove_file(dev, &sda_caseopen[1].dev_attr);
+static const struct attribute_group w83627ehf_group = {
+ .attrs = w83627ehf_attrs,
+ .is_visible = w83627ehf_attrs_visible,
+};
- device_remove_file(dev, &dev_attr_name);
- device_remove_file(dev, &dev_attr_cpu0_vid);
-}
+static const struct attribute_group *w83627ehf_groups[] = {
+ &w83627ehf_group,
+ NULL
+};
+
+/*
+ * Driver and device management
+ */
/* Get the monitoring functions started */
static inline void w83627ehf_init_device(struct w83627ehf_data *data,
@@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
}
}
-static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
- int r1, int r2)
-{
- swap(data->temp_src[r1], data->temp_src[r2]);
- swap(data->reg_temp[r1], data->reg_temp[r2]);
- swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
- swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
- swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
-}
-
static void
w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
{
@@ -1954,7 +1293,7 @@ static void
w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
struct w83627ehf_data *data)
{
- int fan3pin, fan4pin, fan4min, fan5pin, regval;
+ int fan3pin, fan4pin, fan5pin, regval;
/* The W83627UHG is simple, only two fan inputs, no config */
if (sio_data->kind == w83627uhg) {
@@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == nct6775) {
- /* On NCT6775, fan4 shares pins with the fdc interface */
- fan3pin = 1;
- fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
- fan4min = 0;
- fan5pin = 0;
- } else if (sio_data->kind == nct6776) {
- bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
-
- if (regval & 0x80)
- fan3pin = gpok;
- else
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-
- if (regval & 0x40)
- fan4pin = gpok;
- else
- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-
- if (regval & 0x20)
- fan5pin = gpok;
- else
- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
-
- fan4min = fan4pin;
- } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
- fan4min = fan4pin;
} else {
fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
- fan4min = fan4pin;
}
data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
data->has_fan |= (fan3pin << 2);
data->has_fan_min |= (fan3pin << 2);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /*
- * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
- * register
- */
- data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
- data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
- } else {
- /*
- * It looks like fan4 and fan5 pins can be alternatively used
- * as fan on/off switches, but fan5 control is write only :/
- * We assume that if the serial interface is disabled, designers
- * connected fan5 as input unless they are emitting log 1, which
- * is not the default.
- */
- regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((regval & (1 << 2)) && fan4pin) {
- data->has_fan |= (1 << 3);
- data->has_fan_min |= (1 << 3);
+ /*
+ * It looks like fan4 and fan5 pins can be alternatively used
+ * as fan on/off switches, but fan5 control is write only :/
+ * We assume that if the serial interface is disabled, designers
+ * connected fan5 as input unless they are emitting log 1, which
+ * is not the default.
+ */
+ regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((regval & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(regval & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+}
+
+static umode_t
+w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct w83627ehf_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_temp:
+ /* channel 0.., name 1.. */
+ if (!(data->have_temp & (1 << channel)))
+ return 0;
+ if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+ return 0444;
+ if (channel == 2 && data->temp3_val_only)
+ return 0;
+ if (attr == hwmon_temp_max) {
+ if (data->reg_temp_over[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (attr == hwmon_temp_max_hyst) {
+ if (data->reg_temp_hyst[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (channel > 2)
+ return 0;
+ if (attr == hwmon_temp_alarm || attr == hwmon_temp_type)
+ return 0444;
+ if (attr == hwmon_temp_offset) {
+ if (data->have_temp_offset & (1 << channel))
+ return 0644;
+ else
+ return 0;
+ }
+ break;
+
+ case hwmon_fan:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)))
+ return 0;
+ if (attr == hwmon_fan_input || attr == hwmon_fan_alarm)
+ return 0444;
+ if (attr == hwmon_fan_div) {
+ return 0444;
}
- if (!(regval & (1 << 1)) && fan5pin) {
- data->has_fan |= (1 << 4);
- data->has_fan_min |= (1 << 4);
+ if (attr == hwmon_fan_min) {
+ if (data->has_fan_min & (1 << channel))
+ return 0644;
+ else
+ return 0;
}
+ break;
+
+ case hwmon_in:
+ /* channel 0.., name 0.. */
+ if (channel >= data->in_num)
+ return 0;
+ if (channel == 6 && data->in6_skip)
+ return 0;
+ if (attr == hwmon_in_alarm || attr == hwmon_in_input)
+ return 0444;
+ if (attr == hwmon_in_min || attr == hwmon_in_max)
+ return 0644;
+ break;
+
+ case hwmon_pwm:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)) ||
+ channel >= data->pwm_num)
+ return 0;
+ if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable ||
+ attr == hwmon_pwm_input)
+ return 0644;
+ break;
+
+ case hwmon_intrusion:
+ return 0644;
+
+ default: /* Shouldn't happen */
+ return 0;
}
+
+ return 0; /* Shouldn't happen */
}
+static int
+w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = LM75_TEMP_FROM_REG(data->temp[channel]);
+ return 0;
+ case hwmon_temp_max:
+ *val = LM75_TEMP_FROM_REG(data->temp_max[channel]);
+ return 0;
+ case hwmon_temp_max_hyst:
+ *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]);
+ return 0;
+ case hwmon_temp_offset:
+ *val = data->temp_offset[channel] * 1000;
+ return 0;
+ case hwmon_temp_type:
+ *val = (int)data->temp_type[channel];
+ return 0;
+ case hwmon_temp_alarm:
+ if (channel < 3) {
+ int bit[] = { 4, 5, 13 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ *val = in_from_reg(data->in[channel], channel, data->scale_in);
+ return 0;
+ case hwmon_in_min:
+ *val = in_from_reg(data->in_min[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_max:
+ *val = in_from_reg(data->in_max[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_alarm:
+ if (channel < 10) {
+ int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = data->rpm[channel];
+ return 0;
+ case hwmon_fan_min:
+ *val = fan_from_reg8(data->fan_min[channel],
+ data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_div:
+ *val = div_from_reg(data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_alarm:
+ if (channel < 5) {
+ int bit[] = { 6, 7, 11, 10, 23 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = data->pwm[channel];
+ return 0;
+ case hwmon_pwm_enable:
+ *val = data->pwm_enable[channel];
+ return 0;
+ case hwmon_pwm_mode:
+ *val = data->pwm_enable[channel];
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ if (attr != hwmon_intrusion_alarm || channel != 0)
+ return -EOPNOTSUPP; /* shouldn't happen */
+
+ *val = !!(data->caseopen & 0x10);
+ return 0;
+}
+
+static int
+w83627ehf_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent);
+
+ switch (type) {
+ case hwmon_fan:
+ return w83627ehf_do_read_fan(data, attr, channel, val);
+
+ case hwmon_in:
+ return w83627ehf_do_read_in(data, attr, channel, val);
+
+ case hwmon_pwm:
+ return w83627ehf_do_read_pwm(data, attr, channel, val);
+
+ case hwmon_temp:
+ return w83627ehf_do_read_temp(data, attr, channel, val);
+
+ case hwmon_intrusion:
+ return w83627ehf_do_read_intrusion(data, attr, channel, val);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ if (attr == hwmon_temp_label) {
+ *str = data->temp_label[data->temp_src[channel]];
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ /* Nothing else should be read as a string */
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ if (type == hwmon_in && attr == hwmon_in_min)
+ return store_in_min(dev, data, channel, val);
+ if (type == hwmon_in && attr == hwmon_in_max)
+ return store_in_max(dev, data, channel, val);
+
+ if (type == hwmon_fan && attr == hwmon_fan_min)
+ return store_fan_min(dev, data, channel, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_max)
+ return store_temp_max(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_max_hyst)
+ return store_temp_max_hyst(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return store_temp_offset(dev, data, channel, val);
+
+ if (type == hwmon_pwm && attr == hwmon_pwm_mode)
+ return store_pwm_mode(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_enable)
+ return store_pwm_enable(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_input)
+ return store_pwm(dev, data, channel, val);
+
+ if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm)
+ return clear_caseopen(dev, data, channel, val);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops w83627ehf_ops = {
+ .is_visible = w83627ehf_is_visible,
+ .read = w83627ehf_read,
+ .read_string = w83627ehf_read_string,
+ .write = w83627ehf_write,
+};
+
+static const struct hwmon_channel_info *w83627ehf_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE),
+ HWMON_CHANNEL_INFO(intrusion,
+ HWMON_INTRUSION_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info w83627ehf_chip_info = {
+ .ops = &w83627ehf_ops,
+ .info = w83627ehf_info,
+};
+
static int w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
struct resource *res;
u8 en_vrm10;
int i, err = 0;
+ struct device *hwmon_dev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
@@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+ /* 667HG has 3 pwms, and 627UHG has only 2 */
switch (sio_data->kind) {
default:
data->pwm_num = 4;
break;
case w83667hg:
case w83667hg_b:
- case nct6775:
- case nct6776:
data->pwm_num = 3;
break;
case w83627uhg:
@@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
data->have_temp = 0x07;
/* Deal with temperature register setup first. */
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- int mask = 0;
-
- /*
- * Display temperature sensor output only if it monitors
- * a source other than one already reported. Always display
- * first three temperature registers, though.
- */
- for (i = 0; i < NUM_REG_TEMP; i++) {
- u8 src;
-
- data->reg_temp[i] = NCT6775_REG_TEMP[i];
- data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
- data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
- data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
-
- src = w83627ehf_read_value(data,
- NCT6775_REG_TEMP_SOURCE[i]);
- src &= 0x1f;
- if (src && !(mask & (1 << src))) {
- data->have_temp |= 1 << i;
- mask |= 1 << src;
- }
-
- data->temp_src[i] = src;
-
- /*
- * Now do some register swapping if index 0..2 don't
- * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
- * Idea is to have the first three attributes
- * report SYSTIN, CPUIN, and AUXIN if possible
- * without overriding the basic system configuration.
- */
- if (i > 0 && data->temp_src[0] != 1
- && data->temp_src[i] == 1)
- w82627ehf_swap_tempreg(data, 0, i);
- if (i > 1 && data->temp_src[1] != 2
- && data->temp_src[i] == 2)
- w82627ehf_swap_tempreg(data, 1, i);
- if (i > 2 && data->temp_src[2] != 3
- && data->temp_src[i] == 3)
- w82627ehf_swap_tempreg(data, 2, i);
- }
- if (sio_data->kind == nct6776) {
- /*
- * On NCT6776, AUXTIN and VIN3 pins are shared.
- * Only way to detect it is to check if AUXTIN is used
- * as a temperature source, and if that source is
- * enabled.
- *
- * If that is the case, disable in6, which reports VIN3.
- * Otherwise disable temp3.
- */
- if (data->temp_src[2] == 3) {
- u8 reg;
-
- if (data->reg_temp_config[2])
- reg = w83627ehf_read_value(data,
- data->reg_temp_config[2]);
- else
- reg = 0; /* Assume AUXTIN is used */
-
- if (reg & 0x01)
- data->have_temp &= ~(1 << 2);
- else
- data->in6_skip = 1;
- }
- data->temp_label = nct6776_temp_label;
- } else {
- data->temp_label = nct6775_temp_label;
- }
- data->have_temp_offset = data->have_temp & 0x07;
- for (i = 0; i < 3; i++) {
- if (data->temp_src[i] > 3)
- data->have_temp_offset &= ~(1 << i);
- }
- } else if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg_b) {
u8 reg;
w83627ehf_set_temp_reg_ehf(data, 4);
@@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
data->have_temp_offset = data->have_temp & 0x07;
}
- if (sio_data->kind == nct6775) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg16;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
- data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
- } else if (sio_data->kind == nct6776) {
- data->has_fan_div = false;
- data->fan_from_reg = fan_from_reg13;
- data->fan_from_reg_min = fan_from_reg13;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- } else if (sio_data->kind == w83667hg_b) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
+ if (sio_data->kind == w83667hg_b) {
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
@@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
goto exit_release;
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
- sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
/*
* W83667HG has different pins for VID input and output, so
* we can get the VID input values directly at logical device D
@@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
*/
superio_select(sio_data->sioreg, W83667HG_LD_VID);
data->vid = superio_inb(sio_data->sioreg, 0xe3);
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else if (sio_data->kind != w83627uhg) {
superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
@@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev)
SIO_REG_VID_DATA);
if (sio_data->kind == w83627ehf) /* 6 VID pins only */
data->vid &= 0x3f;
-
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else {
dev_info(dev,
"VID pins in output mode, CPU VID not available\n");
}
}
- if (fan_debounce &&
- (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
- u8 tmp;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
- if (sio_data->kind == nct6776)
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x3e | tmp);
- else
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x1e | tmp);
- pr_info("Enabled fan debounce for chip %s\n", data->name);
- }
-
w83627ehf_check_fan_inputs(sio_data, data);
superio_exit(sio_data->sioreg);
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++)
data->pwm_enable_orig[i] = data->pwm_enable[i];
- /* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
- err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
- err = device_create_file(dev, &attr->dev_attr);
- if (err)
- goto exit_remove;
- }
- }
- /* if fan3 and fan4 are enabled create the sf3 files for them */
- if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan3[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_min[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_max[i].dev_attr)))
- goto exit_remove;
- }
-
- for (i = 0; i < 5; i++) {
- if (data->has_fan & (1 << i)) {
- if ((err = device_create_file(dev,
- &sda_fan_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr)))
- goto exit_remove;
- if (sio_data->kind != nct6776) {
- err = device_create_file(dev,
- &sda_fan_div[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->has_fan_min & (1 << i)) {
- err = device_create_file(dev,
- &sda_fan_min[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i < data->pwm_num &&
- ((err = device_create_file(dev,
- &sda_pwm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_mode[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_enable[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_target_temp[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_tolerance[i].dev_attr))))
- goto exit_remove;
- }
- }
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ data->name,
+ data,
+ &w83627ehf_chip_info,
+ w83627ehf_groups);
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- err = device_create_file(dev, &sda_temp_input[i].dev_attr);
- if (err)
- goto exit_remove;
- if (data->temp_label) {
- err = device_create_file(dev,
- &sda_temp_label[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i == 2 && data->temp3_val_only)
- continue;
- if (data->reg_temp_over[i]) {
- err = device_create_file(dev,
- &sda_temp_max[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp_hyst[i]) {
- err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i > 2)
- continue;
- if ((err = device_create_file(dev,
- &sda_temp_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_type[i].dev_attr)))
- goto exit_remove;
- if (data->have_temp_offset & (1 << i)) {
- err = device_create_file(dev,
- &sda_temp_offset[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- }
-
- err = device_create_file(dev, &sda_caseopen[0].dev_attr);
- if (err)
- goto exit_remove;
-
- if (sio_data->kind == nct6776) {
- err = device_create_file(dev, &sda_caseopen[1].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- err = device_create_file(dev, &dev_attr_name);
- if (err)
- goto exit_remove;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
+ return PTR_ERR_OR_ZERO(hwmon_dev);
- return 0;
-
-exit_remove:
- w83627ehf_device_remove_files(dev);
exit_release:
release_region(res->start, IOREGION_LENGTH);
exit:
@@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev)
{
struct w83627ehf_data *data = platform_get_drvdata(pdev);
- hwmon_device_unregister(data->hwmon_dev);
- w83627ehf_device_remove_files(&pdev->dev);
release_region(data->addr, IOREGION_LENGTH);
return 0;
@@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev)
static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
mutex_lock(&data->update_lock);
data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
- if (sio_data->kind == nct6775) {
- data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- }
mutex_unlock(&data->update_lock);
return 0;
@@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev)
static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev)
if (!(data->has_fan_min & (1 << i)))
continue;
- w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i],
data->fan_min[i]);
}
@@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev)
/* Restore other settings */
w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
- if (sio_data->kind == nct6775) {
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
- }
/* Force re-reading all values */
data->valid = 0;
@@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
static const char sio_name_W83667HG[] __initconst = "W83667HG";
static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
- static const char sio_name_NCT6775[] __initconst = "NCT6775F";
- static const char sio_name_NCT6776[] __initconst = "NCT6776F";
u16 val;
const char *sio_name;
@@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
- case SIO_NCT6775_ID:
- sio_data->kind = nct6775;
- sio_name = sio_name_NCT6775;
- break;
- case SIO_NCT6776_ID:
- sio_data->kind = nct6776;
- sio_name = sio_name_NCT6776;
- break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index dc3f507e7562..a90d757f7043 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
-#ifdef CONFIG_CPU_PM
static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
@@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = {
static int etm4_cpu_pm_register(void)
{
- return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+
+ return 0;
}
static void etm4_cpu_pm_unregister(void)
{
- cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
}
-#else
-static int etm4_cpu_pm_register(void) { return 0; }
-static void etm4_cpu_pm_unregister(void) { }
-#endif
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index ff340d7ae2e5..abfe3094c047 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -369,7 +369,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
if (unlikely(!dev))
return -ENOMEM;
- dev->base = ioremap_nocache(res->start, resource_size(res));
+ dev->base = ioremap(res->start, resource_size(res));
if (unlikely(!dev->base)) {
ret = -ENXIO;
goto err;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 38556381f4ca..2f8b8050a223 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev)
adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev,
"scl",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_scl))
- return PTR_ERR(adapter_data->gpio_scl);
+ if (IS_ERR(adapter_data->gpio_scl)) {
+ ret = PTR_ERR(adapter_data->gpio_scl);
+ goto free_both;
+ }
adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev,
"sda",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_sda))
- return PTR_ERR(adapter_data->gpio_sda);
+ if (IS_ERR(adapter_data->gpio_sda)) {
+ ret = PTR_ERR(adapter_data->gpio_sda);
+ goto free_both;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 0829cb696d9d..4fde74eb34a7 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -281,7 +281,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
}
/* remap the memory */
- pmcmsptwi_data.iobase = ioremap_nocache(res->start,
+ pmcmsptwi_data.iobase = ioremap(res->start,
resource_size(res));
if (!pmcmsptwi_data.iobase) {
dev_err(&pldev->dev,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index a98bf31d0e5c..61339c665ebd 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
+ if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_i2c_runtime_resume(&pdev->dev);
- else
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
+ }
+ } else {
ret = pm_runtime_get_sync(i2c_dev->dev);
-
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto unprepare_div_clk;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto disable_rpm;
+ }
}
if (i2c_dev->is_multimaster_mode) {
@@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
ret);
- goto disable_rpm;
+ goto put_rpm;
}
}
@@ -1671,11 +1675,16 @@ disable_div_clk:
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
+put_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_put_sync(&pdev->dev);
+ else
tegra_i2c_runtime_suspend(&pdev->dev);
+disable_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_disable(&pdev->dev);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
@@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev)
static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int err;
i2c_mark_adapter_suspended(&i2c_dev->adapter);
+ err = pm_runtime_force_suspend(dev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
if (err)
return err;
+ err = pm_runtime_force_resume(dev);
+ if (err < 0)
+ return err;
+
i2c_mark_adapter_resumed(&i2c_dev->adapter);
return 0;
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 043691656245..7f8f896fa0c3 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
- unsigned long max_i2c_scl_rate)
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
{
struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b0ff0e12d84c..bd26c3b9634e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+
+ if (data->index > pos && pos > 0) {
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ }
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
master->regs +
@@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
static int dw_i3c_probe(struct platform_device *pdev)
{
struct dw_i3c_master *master;
- struct resource *res;
int ret, irq;
master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 10db0bf0655a..54712793709e 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/of_device.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
@@ -60,6 +61,7 @@
#define CTRL_HALT_EN BIT(30)
#define CTRL_MCS BIT(29)
#define CTRL_MCS_EN BIT(28)
+#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
#define CTRL_HJ_DISEC BIT(8)
#define CTRL_MST_ACK BIT(7)
#define CTRL_HJ_ACK BIT(6)
@@ -70,6 +72,7 @@
#define CTRL_MIXED_FAST_BUS_MODE 2
#define CTRL_MIXED_SLOW_BUS_MODE 3
#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+#define THD_DELAY_MAX 3
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
@@ -388,6 +391,10 @@ struct cdns_i3c_xfer {
struct cdns_i3c_cmd cmds[0];
};
+struct cdns_i3c_data {
+ u8 thd_delay_ns;
+};
+
struct cdns_i3c_master {
struct work_struct hj_work;
struct i3c_master_controller base;
@@ -408,6 +415,7 @@ struct cdns_i3c_master {
struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
+ const struct cdns_i3c_data *devdata;
};
static inline struct cdns_i3c_master *
@@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
return 0;
}
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+ unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+ u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+ (NSEC_PER_SEC / sysclk_rate));
+
+ /* Every value greater than 3 is not valid. */
+ if (thd_delay > THD_DELAY_MAX)
+ thd_delay = THD_DELAY_MAX;
+
+ /* CTLR_THD_DEL value is encoded. */
+ return (THD_DELAY_MAX - thd_delay);
+}
+
static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
@@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
* We will issue ENTDAA afterwards from the threaded IRQ handler.
*/
ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+ /*
+ * Configure data hold delay based on device-specific data.
+ *
+ * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+ * master output. This setting allows to meet this timing on master's
+ * SoC outputs, regardless of PCB balancing.
+ */
+ ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
writel(ctrl, master->regs + CTRL);
cdns_i3c_master_enable(master);
@@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work)
i3c_master_do_daa(&master->base);
}
+static struct cdns_i3c_data cdns_i3c_devdata = {
+ .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+ { /* sentinel */ },
+};
+
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
- struct resource *res;
int ret, irq;
u32 val;
@@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->devdata = of_device_get_match_data(&pdev->dev);
+ if (!master->devdata)
+ return -EINVAL;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id cdns_i3c_master_of_ids[] = {
- { .compatible = "cdns,i3c-master" },
- { /* sentinel */ },
-};
-
static struct platform_driver cdns_i3c_master = {
.probe = cdns_i3c_master_probe,
.remove = cdns_i3c_master_remove,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 75fd2a7b0842..7833e650789f 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -41,6 +41,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/tick.h>
@@ -79,6 +80,7 @@ struct idle_cpu {
unsigned long auto_demotion_disable_flags;
bool byt_auto_demotion_disable_flag;
bool disable_promotion_to_c1e;
+ bool use_acpi;
};
static const struct idle_cpu *icpu;
@@ -90,6 +92,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Enable this state by default even if the ACPI _CST does not list it.
+ */
+#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
+
+/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
@@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
.enter = &intel_idle,
@@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
.enter = &intel_idle,
@@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
.enter = &intel_idle,
@@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
mwait_idle_with_hints(eax, ecx);
}
-static void __setup_broadcast_timer(bool on)
-{
- if (on)
- tick_broadcast_enable();
- else
- tick_broadcast_disable();
-}
-
-static void auto_demotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-}
-static void c1e_promotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
- msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
-}
-
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_nhx = {
+ .state_table = nehalem_cstates,
+ .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
@@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_snx = {
+ .state_table = snb_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_byt = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
@@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = {
static const struct idle_cpu idle_cpu_ivt = {
.state_table = ivt_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_hsw = {
@@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_hsx = {
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_bdw = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_bdx = {
+ .state_table = bdw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_skl = {
.state_table = skl_cstates,
.disable_promotion_to_c1e = true,
@@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = {
static const struct idle_cpu idle_cpu_skx = {
.state_table = skx_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_knl = {
.state_table = knl_cstates,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_bxt = {
@@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = {
static const struct idle_cpu idle_cpu_dnv = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx),
INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
- INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem),
- INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx),
+ INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
- INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
- INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx),
INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
@@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx),
INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx),
+ INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx),
INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
@@ -1095,76 +1107,169 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
{}
};
-/*
- * intel_idle_probe()
+#define INTEL_CPU_FAM6_MWAIT \
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
+
+static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
+ INTEL_CPU_FAM6_MWAIT,
+ {}
+};
+
+static bool __init intel_idle_max_cstate_reached(int cstate)
+{
+ if (cstate + 1 > max_cstate) {
+ pr_info("max_cstate %d reached\n", max_cstate);
+ return true;
+ }
+ return false;
+}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+#include <acpi/processor.h>
+
+static bool no_acpi __read_mostly;
+module_param(no_acpi, bool, 0444);
+MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
+
+static struct acpi_processor_power acpi_state_table __initdata;
+
+/**
+ * intel_idle_cst_usable - Check if the _CST information can be used.
+ *
+ * Check if all of the C-states listed by _CST in the max_cstate range are
+ * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
*/
-static int __init intel_idle_probe(void)
+static bool __init intel_idle_cst_usable(void)
{
- unsigned int eax, ebx, ecx;
- const struct x86_cpu_id *id;
+ int cstate, limit;
- if (max_cstate == 0) {
- pr_debug("disabled\n");
- return -EPERM;
- }
+ limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
+ acpi_state_table.count);
- id = x86_match_cpu(intel_idle_ids);
- if (!id) {
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6)
- pr_debug("does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return -ENODEV;
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
+
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ return false;
}
- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
- pr_debug("Please enable MWAIT in BIOS SETUP\n");
- return -ENODEV;
+ return true;
+}
+
+static bool __init intel_idle_acpi_cst_extract(void)
+{
+ unsigned int cpu;
+
+ if (no_acpi) {
+ pr_debug("Not allowed to use ACPI _CST\n");
+ return false;
}
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
+ for_each_possible_cpu(cpu) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ if (!pr)
+ continue;
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
- !mwait_substates)
- return -ENODEV;
+ if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
+ continue;
- pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+ acpi_state_table.count++;
- icpu = (const struct idle_cpu *)id->driver_data;
- cpuidle_state_table = icpu->state_table;
+ if (!intel_idle_cst_usable())
+ continue;
- pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
- boot_cpu_data.x86_model);
+ if (!acpi_processor_claim_cst_control()) {
+ acpi_state_table.count = 0;
+ return false;
+ }
- return 0;
+ return true;
+ }
+
+ pr_debug("ACPI _CST not found or not usable\n");
+ return false;
}
-/*
- * intel_idle_cpuidle_devices_uninit()
- * Unregisters the cpuidle devices.
- */
-static void intel_idle_cpuidle_devices_uninit(void)
+static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
{
- int i;
- struct cpuidle_device *dev;
+ int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
+ if (intel_idle_max_cstate_reached(cstate))
+ break;
+
+ cx = &acpi_state_table.states[cstate];
+
+ state = &drv->states[drv->state_count++];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
+ strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = cx->latency;
+ /*
+ * For C1-type C-states use the same number for both the exit
+ * latency and target residency, because that is the case for
+ * C1 in the majority of the static C-states tables above.
+ * For the other types of C-states, however, set the target
+ * residency to 3 times the exit latency which should lead to
+ * a reasonable balance between energy-efficiency and
+ * performance in the majority of interesting cases.
+ */
+ state->target_residency = cx->latency;
+ if (cx->type > ACPI_STATE_C1)
+ state->target_residency *= 3;
+
+ state->flags = MWAIT2flg(cx->address);
+ if (cx->type > ACPI_STATE_C2)
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+
+ state->enter = intel_idle;
+ state->enter_s2idle = intel_idle_s2idle;
}
}
+static bool __init intel_idle_off_by_default(u32 mwait_hint)
+{
+ int cstate, limit;
+
+ /*
+ * If there are no _CST C-states, do not disable any C-states by
+ * default.
+ */
+ if (!acpi_state_table.count)
+ return false;
+
+ limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ if (acpi_state_table.states[cstate].address == mwait_hint)
+ return false;
+ }
+ return true;
+}
+#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+static inline bool intel_idle_acpi_cst_extract(void) { return false; }
+static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
+static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+
/*
* ivt_idle_state_table_update(void)
*
* Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-static void ivt_idle_state_table_update(void)
+static void __init ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
int cpu, package_num, num_sockets = 1;
@@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void)
/* else, 1 and 2 socket systems use default ivt_cstates */
}
-/*
- * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+/**
+ * irtl_2_usec - IRTL to microseconds conversion.
+ * @irtl: IRTL MSR value.
+ *
+ * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
*/
-
-static unsigned int irtl_ns_units[] = {
- 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
-
-static unsigned long long irtl_2_usec(unsigned long long irtl)
+static unsigned long long __init irtl_2_usec(unsigned long long irtl)
{
+ static const unsigned int irtl_ns_units[] __initconst = {
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
+ };
unsigned long long ns;
if (!irtl)
@@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
ns = irtl_ns_units[(irtl >> 10) & 0x7];
- return div64_u64((irtl & 0x3FF) * ns, 1000);
+ return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
}
+
/*
* bxt_idle_state_table_update(void)
*
* On BXT, we trust the IRTL to show the definitive maximum latency
* We use the same value for target_residency.
*/
-static void bxt_idle_state_table_update(void)
+static void __init bxt_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int usec;
@@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void)
* On SKL-H (model 0x5e) disable C8 and C9 if:
* C10 is enabled and SGX disabled
*/
-static void sklh_idle_state_table_update(void)
+static void __init sklh_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int eax, ebx, ecx, edx;
@@ -1284,7 +1392,7 @@ static void sklh_idle_state_table_update(void)
/* if SGX is present */
if (ebx & (1 << 2)) {
- rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+ rdmsrl(MSR_IA32_FEAT_CTL, msr);
/* if SGX is enabled */
if (msr & (1 << 18))
@@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void)
skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
-/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
- */
-static void intel_idle_state_table_update(void)
+static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
- switch (boot_cpu_data.x86_model) {
+ unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
+ unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
+ MWAIT_SUBSTATE_MASK;
+
+ /* Ignore the C-state if there are NO sub-states in CPUID for it. */
+ if (num_substates == 0)
+ return false;
+
+ if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle states deeper than C2");
+ return true;
+}
+
+static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+{
+ int cstate;
+
+ switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
@@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void)
sklh_idle_state_table_update();
break;
}
-}
-
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
- */
-static void __init intel_idle_cpuidle_driver_init(void)
-{
- int cstate;
- struct cpuidle_driver *drv = &intel_idle_driver;
-
- intel_idle_state_table_update();
-
- cpuidle_poll_state_init(drv);
- drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate;
+ unsigned int mwait_hint;
- if ((cpuidle_state_table[cstate].enter == NULL) &&
- (cpuidle_state_table[cstate].enter_s2idle == NULL))
+ if (intel_idle_max_cstate_reached(cstate))
break;
- if (cstate + 1 > max_cstate) {
- pr_info("max_cstate %d reached\n", max_cstate);
+ if (!cpuidle_state_table[cstate].enter &&
+ !cpuidle_state_table[cstate].enter_s2idle)
break;
- }
-
- mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-
- /* number of sub-states for this state in CPUID.MWAIT */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
-
- /* if NO sub-states for this state in CPUID, skip it */
- if (num_substates == 0)
- continue;
- /* if state marked as disabled, skip it */
+ /* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ if (!intel_idle_verify_cstate(mwait_hint))
+ continue;
- if (((mwait_cstate + 1) > 2) &&
- !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- mark_tsc_unstable("TSC halts in idle"
- " states deeper than C2");
+ /* Structure copy. */
+ drv->states[drv->state_count] = cpuidle_state_table[cstate];
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[cstate];
+ if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) &&
+ !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))
+ drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
- drv->state_count += 1;
+ drv->state_count++;
}
if (icpu->byt_auto_demotion_disable_flag) {
@@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void)
}
}
+/*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ cpuidle_poll_state_init(drv);
+ drv->state_count = 1;
+
+ if (icpu)
+ intel_idle_init_cstates_icpu(drv);
+ else
+ intel_idle_init_cstates_acpi(drv);
+}
+
+static void auto_demotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+}
+
+static void c1e_promotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
/*
* intel_idle_cpu_init()
@@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
return -EIO;
}
+ if (!icpu)
+ return 0;
+
if (icpu->auto_demotion_disable_flags)
auto_demotion_disable();
@@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
struct cpuidle_device *dev;
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- __setup_broadcast_timer(true);
+ tick_broadcast_enable();
/*
* Some systems can hotplug a cpu at runtime after
@@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu)
return 0;
}
+/**
+ * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
+ */
+static void __init intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
+}
+
static int __init intel_idle_init(void)
{
+ const struct x86_cpu_id *id;
+ unsigned int eax, ebx, ecx;
int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
- retval = intel_idle_probe();
- if (retval)
- return retval;
+ if (max_cstate == 0) {
+ pr_debug("disabled\n");
+ return -EPERM;
+ }
+
+ id = x86_match_cpu(intel_idle_ids);
+ if (id) {
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+ } else {
+ id = x86_match_cpu(intel_mwait_ids);
+ if (!id)
+ return -ENODEV;
+ }
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
+ return -ENODEV;
+
+ pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+
+ icpu = (const struct idle_cpu *)id->driver_data;
+ if (icpu) {
+ cpuidle_state_table = icpu->state_table;
+ if (icpu->use_acpi)
+ intel_idle_acpi_cst_extract();
+ } else if (!intel_idle_acpi_cst_extract()) {
+ return -ENODEV;
+ }
+
+ pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+ boot_cpu_data.x86_model);
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
+ if (!intel_idle_cpuidle_devices)
return -ENOMEM;
- intel_idle_cpuidle_driver_init();
+ intel_idle_cpuidle_driver_init(&intel_idle_driver);
+
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 3f03abf100b5..306bf15023a7 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -494,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
st->channel_config[channel].buf_negative =
of_property_read_bool(child, "adi,buffered-negative");
- *chan = ad7124_channel_template;
- chan->address = channel;
- chan->scan_index = channel;
- chan->channel = ain[0];
- chan->channel2 = ain[1];
-
- chan++;
+ chan[channel] = ad7124_channel_template;
+ chan[channel].address = channel;
+ chan[channel].scan_index = channel;
+ chan[channel].channel = ain[0];
+ chan[channel].channel2 = ain[1];
}
return 0;
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index fa4586037bb8..0b91de4df8f4 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -65,6 +65,7 @@ config IAQCORE
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Plantower PMS7003 particulate
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index a7d40c02ce6b..b921dd9e108f 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -1301,7 +1301,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id,
for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
- if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
+ if (st_lsm6dsx_sensor_settings[i].id[j].name &&
+ id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
break;
}
if (j < ST_LSM6DSX_MAX_ID)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c193d64e5217..112225c0e486 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
unsigned bytes = 0;
- int length, i;
+ int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
@@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
+
+ bytes = ALIGN(bytes, largest);
return bytes;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 16dacea9eadf..b0e241aaefb4 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data)
if (ret < 0)
return ret;
- data->al_scale = 24000;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
switch (id) {
@@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data)
/* show 54ms in total. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ data->al_scale = 24000;
break;
case VCNL4040_PROD_ID:
/* Integration time is 80ms, add 10ms. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+ data->al_scale = 120000;
break;
}
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 7a3b99597ead..146f98fbf22b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -181,15 +181,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
- * @udata: userspace context to pin memory for
+ * @device: IB device to connect UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
*/
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access)
{
- struct ib_ucontext *context;
struct ib_umem *umem;
struct page **page_list;
unsigned long lock_limit;
@@ -201,14 +200,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
- if (!udata)
- return ERR_PTR(-EIO);
-
- context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
- if (!context)
- return ERR_PTR(-EIO);
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -226,7 +217,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem)
return ERR_PTR(-ENOMEM);
- umem->ibdev = context->device;
+ umem->ibdev = device;
umem->length = size;
umem->address = addr;
umem->writable = ib_access_writable(access);
@@ -281,7 +272,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
npages -= ret;
sg = ib_umem_add_sg_table(sg, page_list, ret,
- dma_get_max_seg_size(context->device->dma_device),
+ dma_get_max_seg_size(device->dma_device),
&umem->sg_nents);
up_read(&mm->mmap_sem);
@@ -289,10 +280,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg(context->device,
- umem->sg_head.sgl,
- umem->sg_nents,
- DMA_BIDIRECTIONAL);
+ umem->nmap = ib_dma_map_sg(device,
+ umem->sg_head.sgl,
+ umem->sg_nents,
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
@@ -303,7 +294,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
goto out;
umem_release:
- __ib_umem_release(context->device, umem, 0);
+ __ib_umem_release(device, umem, 0);
vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e42d44e501fd..dac3fd2ebc26 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -110,15 +110,12 @@ out_page_list:
* They exist only to hold the per_mm reference to help the driver create
* children umems.
*
- * @udata: udata from the syscall being used to create the umem
+ * @device: IB device to create UMEM
* @access: ib_reg_mr access flags
*/
-struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access)
{
- struct ib_ucontext *context =
- container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
int ret;
@@ -126,14 +123,11 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
- if (!context)
- return ERR_PTR(-EIO);
-
umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
if (!umem_odp)
return ERR_PTR(-ENOMEM);
umem = &umem_odp->umem;
- umem->ibdev = context->device;
+ umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
umem_odp->is_implicit_odp = 1;
@@ -201,7 +195,7 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
/**
* ib_umem_odp_get - Create a umem_odp for a userspace va
*
- * @udata: userspace context to pin memory for
+ * @device: IB device struct to get UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
@@ -210,23 +204,14 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
* pinning, instead, stores the mm for future page fault handling in
* conjunction with MMU notifiers.
*/
-struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access,
+struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
+ unsigned long addr, size_t size, int access,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
- struct ib_ucontext *context;
struct mm_struct *mm;
int ret;
- if (!udata)
- return ERR_PTR(-EIO);
-
- context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
- if (!context)
- return ERR_PTR(-EIO);
-
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
return ERR_PTR(-EINVAL);
@@ -234,7 +219,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
if (!umem_odp)
return ERR_PTR(-ENOMEM);
- umem_odp->umem.ibdev = context->device;
+ umem_odp->umem.ibdev = device;
umem_odp->umem.length = size;
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index dd765e176cdd..d33bdc9b01cd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1990,6 +1990,47 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags)
+{
+ struct ib_mr *mr;
+
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ if (!(pd->device->attrs.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
+ access_flags, NULL);
+
+ if (IS_ERR(mr))
+ return mr;
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->res.type = RDMA_RESTRACK_MR;
+ rdma_restrack_kadd(&mr->res);
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_reg_user_mr);
+
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge)
+{
+ if (!pd->device->ops.advise_mr)
+ return -EOPNOTSUPP;
+
+ return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
+ NULL);
+}
+EXPORT_SYMBOL(ib_advise_mr);
+
int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{
struct ib_pd *pd = mr->pd;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ad5112a2325f..52b6a4d85460 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -837,7 +837,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
+ umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -850,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qprva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
@@ -1304,7 +1305,8 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
+ umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2545,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto fail;
}
- cq->umem = ib_umem_get(udata, req.cq_va,
+ cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
@@ -3514,7 +3516,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 4d07d22bfa7b..020f70e6865e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -442,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
goto fail;
}
/* Unconditionally map 8 bytes to support 57500 series */
- nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
+ nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
if (!nq->bar_reg_iomem) {
rc = -ENOMEM;
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5cdfa84faf85..1291b12287a5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -717,7 +717,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
if (!res_base)
return -ENOMEM;
- rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
+ rcfw->cmdq_bar_reg_iomem = ioremap(res_base +
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
@@ -739,7 +739,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
"CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
/* Unconditionally map 8 bytes to support 57500 series */
- rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
+ rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off,
8);
if (!rcfw->creq_bar_reg_iomem) {
dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index bdbde8e22420..60ea1b924b67 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -704,7 +704,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
return -ENOMEM;
}
- dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
+ dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index fe3a7e8561df..962dc97a8ff2 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc);
+ mhp->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 50c22575aed6..4822f5fa12be 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1358,7 +1358,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
int inline_size;
int err;
- if (udata->inlen &&
+ if (udata && udata->inlen &&
!ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, udata not cleared\n");
@@ -1384,7 +1384,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags);
+ mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61362bd6d3ce..1a6268d61977 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
return -EINVAL;
}
- dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
+ dd->kregbase1 = ioremap(addr, RCV_ARRAY);
if (!dd->kregbase1) {
dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
return -ENOMEM;
@@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
- dd->kregbase2 = ioremap_nocache(
+ dd->kregbase2 = ioremap(
addr + dd->base2_start,
TXE_PIO_SEND - dd->base2_start);
if (!dd->kregbase2) {
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
index 343fb9894a82..985ffa9cc958 100644
--- a/drivers/infiniband/hw/hfi1/trace_tid.h
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */
TP_ARGS(dd, index, type, pa, order),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd)
- __field(unsigned long, pa);
- __field(u32, index);
- __field(u32, type);
- __field(u16, order);
+ __field(unsigned long, pa)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u16, order)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd);
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 09eb0c9ada00..769e5e4710c6 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo,
TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
TP_ARGS(dd, ctxt, subctxt, i),
TP_STRUCT__entry(
- DD_DEV_ENTRY(dd);
+ DD_DEV_ENTRY(dd)
__field(u16, ctxt)
__field(u8, subctxt)
__field(u8, ver_opcode)
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index af1d8823b3f0..a2d1e5331bf1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -163,7 +163,7 @@ static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
u32 npages;
int ret;
- *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 10af6958ab69..bff6abdccfb0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, page_addr,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 9ad19170c3f9..3ff610549c74 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags);
+ mr->umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
+ mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index a6565b674801..eb2ee6a581aa 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -744,7 +744,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_alloc_rq_inline_buf;
}
- hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
+ hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 7113ebfdb4f0..c6d5f06f9cde 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -186,7 +186,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
+ srq->umem =
+ ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
@@ -205,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
goto err_user_srq_mtt;
/* config index queue BA */
- srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
+ srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index dbd96d029d8b..c335de91508f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1756,12 +1756,15 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
int ret;
int pg_shift;
+ if (!udata)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (iwdev->closing)
return ERR_PTR(-ENODEV);
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc);
+ region = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 306b21281fa2..a57033d4b0e5 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int shift;
int n;
- *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
+ *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 714f9df5bf39..d41f03ccb0e1 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index dfa17bcdcdbc..b0121c90c561 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -367,7 +367,7 @@ end:
return block_shift;
}
-static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
+static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
u64 length, int access_flags)
{
/*
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags);
+ return ib_umem_get(device, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags);
+ mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -504,7 +504,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = mlx4_get_umem_mr(udata, start, length,
+ mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 85f57b76e446..cb23cdb9389a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
+ qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,7 +1110,8 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
+ qp->umem =
+ ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 8dcf6e3d9ae2..8f9d5035142d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
+ srq->umem =
+ ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dd8d24ee8e1d..367a71bc5f4b 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -708,8 +708,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*cqe_size = ucmd.cqe_size;
cq->buf.umem =
- ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE);
+ ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1108,7 +1108,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
return -EINVAL;
- umem = ib_umem_get(udata, ucmd.buf_addr,
+ umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 9d0a18cf9e5e..685b8ed96b4e 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2134,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 12737c509aa2..61475b571531 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 997cbfe4b90c..ab4ec33409b8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/bitmap.h>
#if defined(CONFIG_X86)
-#include <asm/pat.h>
+#include <asm/memtype.h>
#endif
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -815,6 +815,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
{
+ size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
@@ -828,12 +829,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
u64 max_tso;
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
- if (uhw->outlen && uhw->outlen < resp_len)
+ if (uhw_outlen && uhw_outlen < resp_len)
return -EINVAL;
resp.response_length = resp_len;
- if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
+ if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
memset(props, 0, sizeof(*props));
@@ -897,7 +898,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->raw_packet_caps |=
IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
- if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
if (max_tso) {
resp.tso_caps.max_tso = 1 << max_tso;
@@ -907,7 +908,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
resp.rss_caps.rx_hash_function =
MLX5_RX_HASH_FUNC_TOEPLITZ;
resp.rss_caps.rx_hash_fields_mask =
@@ -927,9 +928,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.rss_caps);
}
} else {
- if (field_avail(typeof(resp), tso_caps, uhw->outlen))
+ if (field_avail(typeof(resp), tso_caps, uhw_outlen))
resp.response_length += sizeof(resp.tso_caps);
- if (field_avail(typeof(resp), rss_caps, uhw->outlen))
+ if (field_avail(typeof(resp), rss_caps, uhw_outlen))
resp.response_length += sizeof(resp.rss_caps);
}
@@ -1014,6 +1015,23 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
+ if (!uhw) {
+ /* ODP for kernel QPs is not implemented for receive
+ * WQEs and SRQ WQEs
+ */
+ props->odp_caps.per_transport_caps.rc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.uc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.ud_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.xrc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ }
}
if (MLX5_CAP_GEN(mdev, cd))
@@ -1054,7 +1072,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MAX_CQ_PERIOD;
}
- if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.cqe_comp_caps);
if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
@@ -1072,7 +1090,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
@@ -1091,7 +1109,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
- uhw->outlen)) {
+ uhw_outlen)) {
if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
resp.mlx5_ib_support_multi_pkt_send_wqes =
MLX5_IB_ALLOW_MPW;
@@ -1104,7 +1122,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), flags, uhw->outlen)) {
+ if (field_avail(typeof(resp), flags, uhw_outlen)) {
resp.response_length += sizeof(resp.flags);
if (MLX5_CAP_GEN(mdev, cqe_compression_128))
@@ -1120,8 +1138,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
- if (field_avail(typeof(resp), sw_parsing_caps,
- uhw->outlen)) {
+ if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.sw_parsing_caps);
if (MLX5_CAP_ETH(mdev, swp)) {
resp.sw_parsing_caps.sw_parsing_offloads |=
@@ -1141,7 +1158,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
@@ -1164,8 +1181,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), tunnel_offloads_caps,
- uhw->outlen)) {
+ if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.tunnel_offloads_caps);
if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
resp.tunnel_offloads_caps |=
@@ -1186,7 +1202,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
- if (uhw->outlen) {
+ if (uhw_outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -3276,12 +3292,14 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
int num_entries, int num_groups,
u32 flags)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
- ft = mlx5_create_auto_grouped_flow_table(ns, priority,
- num_entries,
- num_groups,
- 0, flags);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = num_entries;
+ ft_attr.flags = flags;
+ ft_attr.autogroup.max_num_groups = num_groups;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return ERR_CAST(ft);
@@ -4771,7 +4789,6 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
@@ -4781,7 +4798,7 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
if (!dprops)
goto out;
- err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
+ err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
if (err) {
mlx5_ib_warn(dev, "query_device failed %d\n", err);
goto out;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b06f32ff5748..77d495b2032d 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1153,12 +1153,12 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
- int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
- int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
- void *buffer, int buflen, size_t *bc);
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ea8bfc3e2d8d..44a0ee6bd9f1 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -737,10 +737,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT;
}
-static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- u64 start, u64 length, int access_flags,
- struct ib_umem **umem, int *npages, int *page_shift,
- int *ncont, int *order)
+static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
+ int access_flags, struct ib_umem **umem, int *npages,
+ int *page_shift, int *ncont, int *order)
{
struct ib_umem *u;
@@ -749,7 +748,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp;
- odp = ib_umem_odp_get(udata, start, length, access_flags,
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
&mlx5_mn_ops);
if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
@@ -765,7 +764,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags);
+ u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -1247,6 +1246,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
length == U64_MAX) {
+ if (virt_addr != start)
+ return ERR_PTR(-EINVAL);
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
@@ -1257,7 +1258,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
}
- err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
+ err = mr_umem_get(dev, start, length, access_flags, &umem,
&npages, &page_shift, &ncont, &order);
if (err < 0)
@@ -1424,9 +1425,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
mr->umem = NULL;
- err = mr_umem_get(dev, udata, addr, len, access_flags,
- &mr->umem, &npages, &page_shift, &ncont,
- &order);
+ err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
+ &npages, &page_shift, &ncont, &order);
if (err)
goto err;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f924250f80c2..0afb0042bd53 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -497,7 +497,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr;
int err;
- umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
+ umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
@@ -624,11 +624,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
unsigned long current_seq;
u64 access_mask;
- u64 start_idx, page_mask;
+ u64 start_idx;
page_shift = odp->page_shift;
- page_mask = ~(BIT(page_shift) - 1);
- start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
@@ -767,11 +766,19 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ if (unlikely(io_virt < mr->mmkey.iova))
+ return -EFAULT;
+
if (!odp->is_implicit_odp) {
- if (unlikely(io_virt < ib_umem_start(odp) ||
- ib_umem_end(odp) - io_virt < bcnt))
+ u64 user_va;
+
+ if (check_add_overflow(io_virt - mr->mmkey.iova,
+ (u64)odp->umem.address, &user_va))
+ return -EFAULT;
+ if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
return -EFAULT;
- return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
flags);
}
return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
@@ -1237,15 +1244,15 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
wqe = wqe_start;
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
if (qp && sq) {
- ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_initiator_pfault_handler(
dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
} else if (qp && !sq) {
- ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_rq(
@@ -1253,8 +1260,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
} else if (!qp) {
struct mlx5_ib_srq *srq = res_to_srq(res);
- ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_srq(
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7e51870e9e01..ae7cbd9c9bca 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -129,14 +129,10 @@ static int is_sqp(enum ib_qp_type qp_type)
*
* Return: zero on success, or an error code.
*/
-static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
- void *buffer,
- u32 buflen,
- int wqe_index,
- int wq_offset,
- int wq_wqe_cnt,
- int wq_wqe_shift,
- int bcnt,
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
+ size_t buflen, int wqe_index,
+ int wq_offset, int wq_wqe_cnt,
+ int wq_wqe_shift, int bcnt,
size_t *bytes_copied)
{
size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
@@ -160,11 +156,43 @@ static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
return 0;
}
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied = 0;
+ size_t wqe_length;
+ void *p;
+ int ds;
+
+ wqe_index = wqe_index & qp->sq.fbc.sz_m1;
+
+ /* read the control segment first */
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ ctrl = p;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* read rest of WQE if it spreads over more than one stride */
+ while (bytes_copied < wqe_length) {
+ size_t copy_length =
+ min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
+
+ if (!copy_length)
+ break;
+
+ memcpy(buffer + bytes_copied, p, copy_length);
+ bytes_copied += copy_length;
+
+ wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ }
+ *bc = bytes_copied;
+ return 0;
+}
+
+static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
@@ -176,18 +204,10 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
int ret;
int ds;
- if (buflen < sizeof(*ctrl))
- return -EINVAL;
-
/* at first read as much as possible */
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
- buflen,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
&bytes_copied);
if (ret)
return ret;
@@ -210,13 +230,9 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
* so read the remaining bytes starting
* from wqe_index 0
*/
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer + bytes_copied,
- buflen - bytes_copied,
- 0,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
+ buflen - bytes_copied, 0, wq->offset,
+ wq->wqe_cnt, wq->wqe_shift,
wqe_length - bytes_copied,
&bytes_copied2);
@@ -226,11 +242,24 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
return 0;
}
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+
+ if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (!umem)
+ return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
+ buflen, bc);
+
+ return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
@@ -238,14 +267,9 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
size_t bytes_copied;
int ret;
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
- buflen,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
&bytes_copied);
if (ret)
@@ -254,25 +278,33 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
return 0;
}
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t wqe_size = 1 << wq->wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct ib_umem *umem = srq->umem;
size_t bytes_copied;
int ret;
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- 0,
- srq->msrq.max,
- srq->msrq.wqe_shift,
- buflen,
- &bytes_copied);
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
+ srq->msrq.max, srq->msrq.wqe_shift,
+ buflen, &bytes_copied);
if (ret)
return ret;
@@ -280,6 +312,21 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
return 0;
}
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t wqe_size = 1 << srq->msrq.wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
@@ -749,7 +796,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0);
+ *umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +853,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
+ rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 62939df3c692..b1a8a9175040 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
+ srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 33002530fee7..ac19d57803b5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,7 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc);
+ mr->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 9bc1ca6f6f9e..d47ea675734b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -869,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4cd292966aa9..920f35e28cfc 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -772,7 +772,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
+ q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -1415,9 +1415,8 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
if (rc)
return rc;
- srq->prod_umem =
- ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access);
+ srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -2839,7 +2838,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index dd4843379f51..91d64dd71a8a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6630,7 +6630,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
/* vl15 buffers start just after the 4k buffers */
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap_nocache(vl15off,
+ dd->piovl15base = ioremap(vl15off,
NUM_VL15_BUFS * dd->align4k);
if (!dd->piovl15base) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index d4fd8a6cff7b..43c8ee1f46e0 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1759,7 +1759,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
qib_userlen = dd->ureg_align * dd->cfgctxts;
/* Sanity checks passed, now create the new mappings */
- qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+ qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
if (!qib_kregbase)
goto bail;
@@ -1768,7 +1768,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
goto bail_kregbase;
if (qib_userlen) {
- qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+ qib_userbase = ioremap(qib_physaddr + dd->uregbase,
qib_userlen);
if (!qib_userbase)
goto bail_piobase;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 864f2af171f7..3dc6ce033319 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -145,7 +145,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
- dd->kregbase = ioremap_nocache(addr, len);
+ dd->kregbase = ioremap(addr, len);
if (!dd->kregbase)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index a26a4fd86bf4..4f6cc0de7ef9 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -135,7 +135,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cq;
}
- cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
+ cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index c61e665ff261..b039f1f00e05 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags);
+ umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index f15809c28f67..9de1281f9a3b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -276,8 +276,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0);
+ qp->rumem =
+ ib_umem_get(pd->device, ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq);
}
- qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
+ qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 98c8be71d91d..d330decfb80a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
+ srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index b9a76bf74857..72f6534fbb52 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 35a2baf2f364..e83c7b518bfa 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access);
+ umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e5f438ab716c..4a0d3a9e72e1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1182,7 +1182,7 @@ unref:
return NETDEV_TX_OK;
}
-static void ipoib_timeout(struct net_device *dev)
+static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b273e421e910 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
-static void
-isert_wait4cmds(struct iscsi_conn *conn)
-{
- isert_info("iscsi_conn %p\n", conn);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
- }
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
- isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f918fca9ada3..cb6e3a5f509c 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
struct evdev_client *client;
int error;
- client = kzalloc(struct_size(client, buffer, bufsize),
- GFP_KERNEL | __GFP_NOWARN);
- if (!client)
- client = vzalloc(struct_size(client, buffer, bufsize));
+ client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
if (!client)
return -ENOMEM;
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index f7414091d94e..2fe9dcfe0a6f 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -107,7 +107,7 @@ static int pxa930_rotary_probe(struct platform_device *pdev)
if (!r)
return -ENOMEM;
- r->mmio_base = ioremap_nocache(res->start, resource_size(res));
+ r->mmio_base = ioremap(res->start, resource_size(res));
if (r->mmio_base == NULL) {
dev_err(&pdev->dev, "failed to remap IO memory\n");
err = -ENXIO;
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 27ad73f43451..c155adebf96e 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -195,7 +195,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata));
pdata = &priv->pdata;
- priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
+ priv->iomem_base = ioremap(res->start, resource_size(res));
if (priv->iomem_base == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
error = -ENXIO;
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 83368f1e7c4e..4650f4a94989 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x44, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x22, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);
diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c
index 4d875f2ac13d..ee55f22dbca5 100644
--- a/drivers/input/misc/max77650-onkey.c
+++ b/drivers/input/misc/max77650-onkey.c
@@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
return input_register_device(onkey->input);
}
+static const struct of_device_id max77650_onkey_of_match[] = {
+ { .compatible = "maxim,max77650-onkey" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
static struct platform_driver max77650_onkey_driver = {
.driver = {
.name = "max77650-onkey",
+ .of_match_table = max77650_onkey_of_match,
},
.probe = max77650_onkey_probe,
};
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index ecd762f93732..53ad25eaf1a2 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
if (regs->enable_mask)
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
- on ? regs->enable_mask : 0, val);
+ regs->enable_mask, on ? ~0 : 0);
return rc;
}
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 41acde60b60f..3332b77eef2a 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -167,7 +167,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev)
goto failed;
}
- trkball->mmio_base = ioremap_nocache(res->start, resource_size(res));
+ trkball->mmio_base = ioremap(res->start, resource_size(res));
if (!trkball->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap registers\n");
error = -ENXIO;
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 0bc01cfc2b51..6b23e679606e 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -24,6 +24,12 @@
#define F54_NUM_TX_OFFSET 1
#define F54_NUM_RX_OFFSET 0
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE 32
+
/* F54 commands */
#define F54_GET_REPORT 1
#define F54_FORCE_CAL 2
@@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work)
int report_size;
u8 command;
int error;
+ int i;
report_size = rmi_f54_get_report_size(f54);
if (report_size == 0) {
@@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work)
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
- fifo[0] = 0;
- fifo[1] = 0;
- error = rmi_write_block(fn->rmi_dev,
- fn->fd.data_base_addr + F54_FIFO_OFFSET,
- fifo, sizeof(fifo));
- if (error) {
- dev_err(&fn->dev, "Failed to set fifo start offset\n");
- goto abort;
- }
+ for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+ int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+ fifo[0] = i & 0xff;
+ fifo[1] = i >> 8;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
- error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
- F54_REPORT_DATA_OFFSET, f54->report_data,
- report_size);
- if (error) {
- dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
- __func__, report_size, error);
- goto abort;
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET,
+ f54->report_data + i, size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, size, error);
+ goto abort;
+ }
}
abort:
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index b313c579914f..2407ea43de59 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
exit:
mutex_unlock(&rmi_smb->page_mutex);
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
retval = 0;
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 96f9b5397367..2f9775de3c5b 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -349,7 +349,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
- ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
+ ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 2ca586fb914f..e08b0ef078e8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
aiptek->inputdev = inputdev;
aiptek->intf = intf;
- aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+ aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
aiptek->inDelay = 0;
aiptek->endDelay = 0;
aiptek->previousJitterable = 0;
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
/* Verify that a device really has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev,
"interface has %d endpoints, but must have minimum 1\n",
- intf->altsetting[0].desc.bNumEndpoints);
+ intf->cur_altsetting->desc.bNumEndpoints);
err = -EINVAL;
goto fail3;
}
- endpoint = &intf->altsetting[0].endpoint[0].desc;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 35031228a6d0..96d65575f75a 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
}
/* Sanity check that a device has an endpoint */
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&usbinterface->dev,
"Invalid number of endpoints\n");
error = -EINVAL;
goto err_free_urb;
}
- /*
- * The endpoint is always altsetting 0, we know this since we know
- * this device only has one interrupt endpoint
- */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
/* Some debug */
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
if (usb_endpoint_xfer_int(endpoint))
dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
- dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+ dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+ usbinterface->cur_altsetting->extralen);
/*
* Find the HID descriptor so we can find out the size of the
@@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
usb_fill_int_urb(gtco->urbinfo,
udev,
usb_rcvintpipe(udev,
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index a1f3a0cb197e..38f087404f7a 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
return -ENODEV;
/* Sanity check that the device has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev, "Invalid number of endpoints\n");
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 0af0fe8c40d7..742a7e96c1b5 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device *hwmon;
+ struct thermal_zone_device *thermal;
int error;
u32 reg;
bool ts_attached;
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(thermal))
+ return PTR_ERR(thermal);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 1dd47dda71cd..34d31c7ec8ba 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface,
int error;
/* Check if we really have the right interface. */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 568c52317757..823cc4ef51fd 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -440,7 +440,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
return NULL;
}
- return (u8 __iomem *)ioremap_nocache(address, end);
+ return (u8 __iomem *)ioremap(address, end);
}
static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg = 0;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
+ /* save the value to restore, if writable */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ goto pc_false;
+
/* Check if the performance counters can be written to */
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2)) {
- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
- }
+ (val != val2))
+ goto pc_false;
+
+ /* restore */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+ return;
+
+pc_false:
+ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+ amd_iommu_pc_present = false;
+ return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1801f0aaf013..932267f49f9a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- if (info)
+ if (info && info != DEFER_DEVICE_DOMAIN_INFO
+ && info != DUMMY_DEVICE_DOMAIN_INFO)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index d246d74ec3a5..23445ebfda5c 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -298,7 +298,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
/* Map internal tpci200 driver user space */
tpci200->info->interface_regs =
- ioremap_nocache(pci_resource_start(tpci200->info->pdev,
+ ioremap(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
if (!tpci200->info->interface_regs) {
@@ -541,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
ret = -EBUSY;
goto out_err_pci_request;
}
- tpci200->info->cfg_regs = ioremap_nocache(
+ tpci200->info->cfg_regs = ioremap(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
pci_resource_len(pdev, TPCI200_CFG_MEM_BAR));
if (!tpci200->info->cfg_regs) {
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 9c2a4b5d30cf..d480a514c983 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->board_id = ipoctal->dev->id_device;
region = &ipoctal->dev->region[IPACK_IO_SPACE];
- addr = devm_ioremap_nocache(&ipoctal->dev->dev,
+ addr = devm_ioremap(&ipoctal->dev->dev,
region->start, region->size);
if (!addr) {
dev_err(&ipoctal->dev->dev,
@@ -292,7 +292,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
region = &ipoctal->dev->region[IPACK_INT_SPACE];
ipoctal->int_space =
- devm_ioremap_nocache(&ipoctal->dev->dev,
+ devm_ioremap(&ipoctal->dev->dev,
region->start, region->size);
if (!ipoctal->int_space) {
dev_err(&ipoctal->dev->dev,
@@ -303,7 +303,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
region = &ipoctal->dev->region[IPACK_MEM8_SPACE];
ipoctal->mem8_space =
- devm_ioremap_nocache(&ipoctal->dev->dev,
+ devm_ioremap(&ipoctal->dev->dev,
region->start, 0x8000);
if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 697e6a8ccaae..1006c694d9fb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -457,6 +457,12 @@ config IMX_IRQSTEER
help
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+config IMX_INTMUX
+ def_bool y if ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX INTMUX interrupt multiplexer.
+
config LS1X_IRQ
bool "Loongson-1 Interrupt Controller"
depends on MACH_LOONGSON32
@@ -490,6 +496,7 @@ config TI_SCI_INTA_IRQCHIP
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
help
This enables support for the PLIC chip found in SiFive (and
potentially other) RISC-V systems. The PLIC controls devices
@@ -499,4 +506,11 @@ config SIFIVE_PLIC
If you don't know what to do here, say Y.
+config EXYNOS_IRQ_COMBINER
+ bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
+ help
+ Say yes here to add support for the IRQ combiner devices embedded
+ in Samsung Exynos chips.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e806dda690ea..eae0d78cbf22 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
-obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
@@ -87,7 +87,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
-obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
@@ -100,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
+obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
new file mode 100644
index 000000000000..c90a3346b985
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Copyright 2019 IBM Corporation
+ *
+ * Eddie James <eajames@linux.ibm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#define ASPEED_SCU_IC_REG 0x018
+#define ASPEED_SCU_IC_SHIFT 0
+#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_NUM_IRQS 7
+#define ASPEED_SCU_IC_STATUS_SHIFT 16
+
+#define ASPEED_AST2600_SCU_IC0_REG 0x560
+#define ASPEED_AST2600_SCU_IC0_SHIFT 0
+#define ASPEED_AST2600_SCU_IC0_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
+#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+
+#define ASPEED_AST2600_SCU_IC1_REG 0x570
+#define ASPEED_AST2600_SCU_IC1_SHIFT 4
+#define ASPEED_AST2600_SCU_IC1_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
+#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+
+struct aspeed_scu_ic {
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned int reg;
+ struct regmap *scu;
+ struct irq_domain *irq_domain;
+};
+
+static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq;
+ unsigned int sts;
+ unsigned long bit;
+ unsigned long enabled;
+ unsigned long max;
+ unsigned long status;
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+
+ chained_irq_enter(chip, desc);
+
+ /*
+ * The SCU IC has just one register to control its operation and read
+ * status. The interrupt enable bits occupy the lower 16 bits of the
+ * register, while the interrupt status bits occupy the upper 16 bits.
+ * The status bit for a given interrupt is always 16 bits shifted from
+ * the enable bit for the same interrupt.
+ * Therefore, perform the IRQ operations in the enable bit space by
+ * shifting the status down to get the mapping and then back up to
+ * clear the bit.
+ */
+ regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ enabled = sts & scu_ic->irq_enable;
+ status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ irq = irq_find_mapping(scu_ic->irq_domain,
+ bit - scu_ic->irq_shift);
+ generic_handle_irq(irq);
+
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the mask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+}
+
+static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the unmask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+}
+
+static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip aspeed_scu_ic_chip = {
+ .name = "aspeed-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask,
+ .irq_unmask = aspeed_scu_ic_irq_unmask,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
+ .map = aspeed_scu_ic_map,
+};
+
+static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
+ struct device_node *node)
+{
+ int irq;
+ int rc = 0;
+
+ if (!node->parent) {
+ rc = -ENODEV;
+ goto err;
+ }
+
+ scu_ic->scu = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(scu_ic->scu)) {
+ rc = PTR_ERR(scu_ic->scu);
+ goto err;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ rc = irq;
+ goto err;
+ }
+
+ scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+ &aspeed_scu_ic_domain_ops,
+ scu_ic);
+ if (!scu_ic->irq_domain) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ scu_ic);
+
+ return 0;
+
+err:
+ kfree(scu_ic);
+
+ return rc;
+}
+
+static int __init aspeed_scu_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
+ scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
+ scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
+ scu_ic->reg = ASPEED_SCU_IC_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
+ aspeed_ast2600_scu_ic0_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
+ aspeed_ast2600_scu_ic1_of_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e05673bcd52b..f71758632f8d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -106,6 +106,7 @@ struct its_node {
u64 typer;
u64 cbaser_save;
u32 ctlr_save;
+ u32 mpidr;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
@@ -116,12 +117,22 @@ struct its_node {
};
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
#define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */
-#define ITS_MAX_VPEID_BITS (16)
+#define ITS_MAX_VPEID_BITS \
+ ({ \
+ int nvpeid = 16; \
+ if (gic_rdists->has_rvpeid && \
+ gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
+ nvpeid = 1 + (gic_rdists->gicd_typer2 & \
+ GICD_TYPER2_VID); \
+ \
+ nvpeid; \
+ })
#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
/* Convert page order to size in bytes */
@@ -216,11 +227,27 @@ static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
return &its_dev->event_map.vlpi_maps[event];
}
-static struct its_collection *irq_to_col(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+{
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ return dev_event_to_vlpi_map(its_dev, event);
+ }
+
+ return NULL;
+}
+
+static int irq_to_cpuid(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_vlpi_map *map = get_vlpi_map(d);
- return dev_event_to_col(its_dev, its_get_event_id(d));
+ if (map)
+ return map->vpe->col_idx;
+
+ return its_dev->event_map.col_map[its_get_event_id(d)];
}
static struct its_collection *valid_col(struct its_collection *col)
@@ -322,6 +349,10 @@ struct its_cmd_desc {
u16 seq_num;
u16 its_list;
} its_vmovp_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ } its_invdb_cmd;
};
};
@@ -438,6 +469,38 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
}
+static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
+{
+ its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
+}
+
+static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
+{
+ its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
+}
+
+static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
+{
+ its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
+}
+
+static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_db(struct its_cmd_block *cmd, bool db)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
+}
+
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
@@ -621,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
- unsigned long vpt_addr;
+ unsigned long vpt_addr, vconf_addr;
u64 target;
-
- vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
- target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+ bool alloc;
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+ if (!desc->its_vmapp_cmd.valid) {
+ if (is_v4_1(its)) {
+ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+ its_encode_alloc(cmd, alloc);
+ }
+
+ goto out;
+ }
+
+ vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
its_encode_target(cmd, target);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+ if (!is_v4_1(its))
+ goto out;
+
+ vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
+ its_encode_alloc(cmd, alloc);
+
+ /* We can only signal PTZ when alloc==1. Why do we have two bits? */
+ its_encode_ptz(cmd, alloc);
+ its_encode_vconf_addr(cmd, vconf_addr);
+ its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
+
+out:
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmapp_cmd.vpe);
@@ -645,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
{
u32 db;
- if (desc->its_vmapti_cmd.db_enabled)
+ if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
else
db = 1023;
@@ -668,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
{
u32 db;
- if (desc->its_vmovi_cmd.db_enabled)
+ if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
else
db = 1023;
@@ -698,6 +787,11 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
its_encode_target(cmd, target);
+ if (is_v4_1(its)) {
+ its_encode_db(cmd, true);
+ its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
+ }
+
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
@@ -757,6 +851,21 @@ static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
return valid_vpe(its, map->vpe);
}
+static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_INVDB);
+ its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_invdb_cmd.vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -1165,20 +1274,17 @@ static void its_send_vclear(struct its_device *dev, u32 event_id)
its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
}
-/*
- * irqchip functions - assumes MSI, mostly.
- */
-static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
{
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
-
- if (!irqd_is_forwarded_to_vcpu(d))
- return NULL;
+ struct its_cmd_desc desc;
- return dev_event_to_vlpi_map(its_dev, event);
+ desc.its_invdb_cmd.vpe = vpe;
+ its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
}
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_vlpi_map *map = get_vlpi_map(d);
@@ -1221,13 +1327,25 @@ static void wait_for_syncr(void __iomem *rdbase)
static void direct_lpi_inv(struct irq_data *d)
{
- struct its_collection *col;
+ struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
+ u64 val;
+
+ if (map) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ WARN_ON(!is_v4_1(its_dev->its));
+
+ val = GICR_INVLPIR_V;
+ val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
+ val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
+ } else {
+ val = d->hwirq;
+ }
/* Target the redistributor this LPI is currently routed to */
- col = irq_to_col(d);
- rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
- gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
}
@@ -1237,7 +1355,8 @@ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
- if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+ if (gic_rdists->has_direct_lpi &&
+ (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
direct_lpi_inv(d);
else if (!irqd_is_forwarded_to_vcpu(d))
its_send_inv(its_dev, its_get_event_id(d));
@@ -1251,6 +1370,13 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
+ /*
+ * GICv4.1 does away with the per-LPI nonsense, nothing to do
+ * here.
+ */
+ if (is_v4_1(its_dev->its))
+ return;
+
map = dev_event_to_vlpi_map(its_dev, event);
if (map->db_enabled == enable)
@@ -2090,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its,
return indirect;
}
+static u32 compute_common_aff(u64 val)
+{
+ u32 aff, clpiaff;
+
+ aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
+ clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
+
+ return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
+}
+
+static u32 compute_its_aff(struct its_node *its)
+{
+ u64 val;
+ u32 svpet;
+
+ /*
+ * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
+ * the resulting affinity. We then use that to see if this match
+ * our own affinity.
+ */
+ svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
+ val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
+ val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
+ return compute_common_aff(val);
+}
+
+static struct its_node *find_sibling_its(struct its_node *cur_its)
+{
+ struct its_node *its;
+ u32 aff;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
+ return NULL;
+
+ aff = compute_its_aff(cur_its);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser;
+
+ if (!is_v4_1(its) || its == cur_its)
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ return its;
+ }
+
+ return NULL;
+}
+
static void its_free_tables(struct its_node *its)
{
int i;
@@ -2132,6 +2317,17 @@ static int its_alloc_tables(struct its_node *its)
break;
case GITS_BASER_TYPE_VCPU:
+ if (is_v4_1(its)) {
+ struct its_node *sibling;
+
+ WARN_ON(i != 2);
+ if ((sibling = find_sibling_its(its))) {
+ *baser = sibling->tables[2];
+ its_write_baser(its, baser, baser->val);
+ continue;
+ }
+ }
+
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
ITS_MAX_VPEID_BITS);
@@ -2153,6 +2349,220 @@ static int its_alloc_tables(struct its_node *its)
return 0;
}
+static u64 inherit_vpe_l1_table_from_its(void)
+{
+ struct its_node *its;
+ u64 val;
+ u32 aff;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser, addr;
+
+ if (!is_v4_1(its))
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ /* We have a winner! */
+ val = GICR_VPROPBASER_4_1_VALID;
+ if (baser & GITS_BASER_INDIRECT)
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
+ FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
+ switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
+ case GIC_PAGE_SIZE_64K:
+ addr = GITS_BASER_ADDR_48_to_52(baser);
+ break;
+ default:
+ addr = baser & GENMASK_ULL(47, 12);
+ break;
+ }
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+
+ return val;
+ }
+
+ return 0;
+}
+
+static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
+{
+ u32 aff;
+ u64 val;
+ int cpu;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ for_each_possible_cpu(cpu) {
+ void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+ u32 tmp;
+
+ if (!base || cpu == smp_processor_id())
+ continue;
+
+ val = gic_read_typer(base + GICR_TYPER);
+ tmp = compute_common_aff(val);
+ if (tmp != aff)
+ continue;
+
+ /*
+ * At this point, we have a victim. This particular CPU
+ * has already booted, and has an affinity that matches
+ * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
+ * Make sure we don't write the Z bit in that case.
+ */
+ val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_Z;
+
+ *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+
+ return val;
+ }
+
+ return 0;
+}
+
+static int allocate_vpe_l1_table(void)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val, gpsz, npg, pa;
+ unsigned int psz = SZ_64K;
+ unsigned int np, epp, esz;
+ struct page *page;
+
+ if (!gic_rdists->has_rvpeid)
+ return 0;
+
+ /*
+ * if VPENDBASER.Valid is set, disable any previously programmed
+ * VPE by setting PendingLast while clearing Valid. This has the
+ * effect of making sure no doorbell will be generated and we can
+ * then safely clear VPROPBASER.Valid.
+ */
+ if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
+ gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ vlpi_base + GICR_VPENDBASER);
+
+ /*
+ * If we can inherit the configuration from another RD, let's do
+ * so. Otherwise, we have to go through the allocation process. We
+ * assume that all RDs have the exact same requirements, as
+ * nothing will work otherwise.
+ */
+ val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!gic_data_rdist()->vpe_table_mask)
+ return -ENOMEM;
+
+ val = inherit_vpe_l1_table_from_its();
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ /* First probe the page size */
+ val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
+ gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+ esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
+
+ switch (gpsz) {
+ default:
+ gpsz = GIC_PAGE_SIZE_4K;
+ /* fall through */
+ case GIC_PAGE_SIZE_4K:
+ psz = SZ_4K;
+ break;
+ case GIC_PAGE_SIZE_16K:
+ psz = SZ_16K;
+ break;
+ case GIC_PAGE_SIZE_64K:
+ psz = SZ_64K;
+ break;
+ }
+
+ /*
+ * Start populating the register from scratch, including RO fields
+ * (which we want to print in debug cases...)
+ */
+ val = 0;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
+
+ /* How many entries per GIC page? */
+ esz++;
+ epp = psz / (esz * SZ_8);
+
+ /*
+ * If we need more than just a single L1 page, flag the table
+ * as indirect and compute the number of required L1 pages.
+ */
+ if (epp < ITS_MAX_VPEID) {
+ int nl2;
+
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+
+ /* Number of L2 pages required to cover the VPEID space */
+ nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
+
+ /* Number of L1 pages to point to the L2 pages */
+ npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
+ } else {
+ npg = 1;
+ }
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
+
+ /* Right, that's the number of CPU pages we need for L1 */
+ np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
+
+ pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
+ np, npg, psz, epp, esz);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ if (!page)
+ return -ENOMEM;
+
+ gic_data_rdist()->vpe_l1_page = page;
+ pa = virt_to_phys(page_address(page));
+ WARN_ON(!IS_ALIGNED(pa, psz));
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ val |= GICR_VPROPBASER_4_1_Z;
+ val |= GICR_VPROPBASER_4_1_VALID;
+
+out:
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
+
+ pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
+ smp_processor_id(), val,
+ cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
+
+ return 0;
+}
+
static int its_alloc_collections(struct its_node *its)
{
int i;
@@ -2244,7 +2654,7 @@ static int __init allocate_lpi_tables(void)
return 0;
}
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
{
u32 count = 1000000; /* 1s! */
bool clean;
@@ -2252,6 +2662,8 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
+ val &= ~clr;
+ val |= set;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do {
@@ -2264,6 +2676,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
}
} while (!clean && count);
+ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+ val |= GICR_VPENDBASER_PendingLast;
+ }
+
return val;
}
@@ -2352,7 +2769,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
- if (gic_rdists->has_vlpis) {
+ if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
/*
@@ -2372,10 +2789,20 @@ static void its_cpu_init_lpis(void)
* ancient programming gets left in and has possibility of
* corrupting memory.
*/
- val = its_clear_vpend_valid(vlpi_base);
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
WARN_ON(val & GICR_VPENDBASER_Dirty);
}
+ if (allocate_vpe_l1_table()) {
+ /*
+ * If the allocation has failed, we're in massive trouble.
+ * Disable direct injection, and pray that no VM was
+ * already running...
+ */
+ gic_rdists->has_rvpeid = false;
+ gic_rdists->has_vlpis = false;
+ }
+
/* Make sure the GIC has seen the above */
dsb(sy);
out:
@@ -2859,7 +3286,7 @@ static const struct irq_domain_ops its_domain_ops = {
/*
* This is insane.
*
- * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
* likely), the only way to perform an invalidate is to use a fake
* device to issue an INV command, implying that the LPI has first
* been mapped to some event on that device. Since this is not exactly
@@ -2867,9 +3294,20 @@ static const struct irq_domain_ops its_domain_ops = {
* only issue an UNMAP if we're short on available slots.
*
* Broken by design(tm).
+ *
+ * GICv4.1, on the other hand, mandates that we're able to invalidate
+ * by writing to a MMIO register. It doesn't implement the whole of
+ * DirectLPI, but that's good enough. And most of the time, we don't
+ * even have to invalidate anything, as the redistributor can be told
+ * whether to generate a doorbell or not (we thus leave it enabled,
+ * always).
*/
static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
/* Already unmapped? */
if (vpe->vpe_proxy_event == -1)
return;
@@ -2892,6 +3330,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
if (!gic_rdists->has_direct_lpi) {
unsigned long flags;
@@ -2903,6 +3345,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
/* Already mapped? */
if (vpe->vpe_proxy_event != -1)
return;
@@ -2925,6 +3371,10 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
unsigned long flags;
struct its_collection *target_col;
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
@@ -2951,7 +3401,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- int cpu = cpumask_first(mask_val);
+ int from, cpu = cpumask_first(mask_val);
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -2959,14 +3409,24 @@ static int its_vpe_set_affinity(struct irq_data *d,
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
*/
- if (vpe->col_idx != cpu) {
- int from = vpe->col_idx;
+ if (vpe->col_idx == cpu)
+ goto out;
- vpe->col_idx = cpu;
- its_send_vmovp(vpe);
- its_vpe_db_proxy_move(vpe, from, cpu);
- }
+ from = vpe->col_idx;
+ vpe->col_idx = cpu;
+ /*
+ * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+ * is sharing its VPE table with the current one.
+ */
+ if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+ cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ goto out;
+
+ its_send_vmovp(vpe);
+ its_vpe_db_proxy_move(vpe, from, cpu);
+
+out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
@@ -3009,16 +3469,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
u64 val;
- val = its_clear_vpend_valid(vlpi_base);
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
- pr_err_ratelimited("ITS virtual pending table not cleaning\n");
- vpe->idai = false;
- vpe->pending_last = true;
- } else {
- vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
- vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
- }
+ vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
}
static void its_vpe_invall(struct its_vpe *vpe)
@@ -3151,6 +3605,139 @@ static struct irq_chip its_vpe_irq_chip = {
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
+static struct its_node *find_4_1_its(void)
+{
+ static struct its_node *its = NULL;
+
+ if (!its) {
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (is_v4_1(its))
+ return its;
+ }
+
+ /* Oops? */
+ its = NULL;
+ }
+
+ return its;
+}
+
+static void its_vpe_4_1_send_inv(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * GICv4.1 wants doorbells to be invalidated using the
+ * INVDB command in order to be broadcast to all RDs. Send
+ * it to the first valid ITS, and let the HW do its magic.
+ */
+ its = find_4_1_its();
+ if (its)
+ its_send_invdb(its, vpe);
+}
+
+static void its_vpe_4_1_mask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_unmask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_schedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val = 0;
+
+ /* Schedule the VPE */
+ val |= GICR_VPENDBASER_Valid;
+ val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
+ val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
+ val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
+
+ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ if (info->req_db) {
+ /*
+ * vPE is going to block: make the vPE non-resident with
+ * PendingLast clear and DB set. The GIC guarantees that if
+ * we read-back PendingLast clear, then a doorbell will be
+ * delivered when an interrupt comes.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ GICR_VPENDBASER_PendingLast,
+ GICR_VPENDBASER_4_1_DB);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ } else {
+ /*
+ * We're not blocking, so just make the vPE non-resident
+ * with PendingLast set, indicating that we'll be back.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ 0,
+ GICR_VPENDBASER_PendingLast);
+ vpe->pending_last = true;
+ }
+}
+
+static void its_vpe_4_1_invall(struct its_vpe *vpe)
+{
+ void __iomem *rdbase;
+ u64 val;
+
+ val = GICR_INVALLR_V;
+ val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+ /* Target the redistributor this vPE is currently known on */
+ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVALLR);
+}
+
+static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case SCHEDULE_VPE:
+ its_vpe_4_1_schedule(vpe, info);
+ return 0;
+
+ case DESCHEDULE_VPE:
+ its_vpe_4_1_deschedule(vpe, info);
+ return 0;
+
+ case INVALL_VPE:
+ its_vpe_4_1_invall(vpe);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_vpe_4_1_irq_chip = {
+ .name = "GICv4.1-vpe",
+ .irq_mask = its_vpe_4_1_mask_irq,
+ .irq_unmask = its_vpe_4_1_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_vpe_set_affinity,
+ .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
+};
+
static int its_vpe_id_alloc(void)
{
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
@@ -3186,7 +3773,10 @@ static int its_vpe_init(struct its_vpe *vpe)
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
- vpe->vpe_proxy_event = -1;
+ if (gic_rdists->has_rvpeid)
+ atomic_set(&vpe->vmapp_count, 0);
+ else
+ vpe->vpe_proxy_event = -1;
return 0;
}
@@ -3228,6 +3818,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
+ struct irq_chip *irqchip = &its_vpe_irq_chip;
struct its_vm *vm = args;
unsigned long *bitmap;
struct page *vprop_page;
@@ -3255,6 +3846,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ if (gic_rdists->has_rvpeid)
+ irqchip = &its_vpe_4_1_irq_chip;
+
for (i = 0; i < nr_irqs; i++) {
vm->vpes[i]->vpe_db_lpi = base + i;
err = its_vpe_init(vm->vpes[i]);
@@ -3265,7 +3859,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (err)
break;
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
- &its_vpe_irq_chip, vm->vpes[i]);
+ irqchip, vm->vpes[i]);
set_bit(i, bitmap);
}
@@ -3778,6 +4372,14 @@ static int __init its_probe_one(struct resource *res,
} else {
pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
}
+
+ if (is_v4_1(its)) {
+ u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+ its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+
+ pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
+ &res->start, its->mpidr, svpet);
+ }
}
its->numa_node = numa_node;
@@ -4138,6 +4740,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4 = false;
int err;
+ gic_rdists = rdists;
+
its_parent = parent_domain;
of_node = to_of_node(handle);
if (of_node)
@@ -4150,8 +4754,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return -ENXIO;
}
- gic_rdists = rdists;
-
err = allocate_lpi_tables();
if (err)
return err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d6218012097b..286f98222878 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -858,8 +858,21 @@ static int __gic_update_rdist_properties(struct redist_region *region,
void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
+
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
- gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+
+ /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
+ gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
+ gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+ gic_data.rdists.has_rvpeid);
+
+ /* Detect non-sensical configurations */
+ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
+ gic_data.rdists.has_direct_lpi = false;
+ gic_data.rdists.has_vlpis = false;
+ gic_data.rdists.has_rvpeid = false;
+ }
+
gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
return 1;
@@ -872,9 +885,10 @@ static void gic_update_rdist_properties(void)
if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
gic_data.ppi_nr = 0;
pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
- pr_info("%sVLPI support, %sdirect LPI support\n",
+ pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
- !gic_data.rdists.has_direct_lpi ? "no " : "");
+ !gic_data.rdists.has_direct_lpi ? "no " : "",
+ !gic_data.rdists.has_rvpeid ? "no " : "");
}
/* Check whether it's single security state view */
@@ -1562,10 +1576,14 @@ static int __init gic_init_bases(void __iomem *dist_base,
pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
+
+ gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
new file mode 100644
index 000000000000..c27577c81126
--- /dev/null
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 NXP
+
+/* INTMUX Block Diagram
+ *
+ * ________________
+ * interrupt source # 0 +---->| |
+ * | | |
+ * interrupt source # 1 +++-->| |
+ * ... | | | channel # 0 |--------->interrupt out # 0
+ * ... | | | |
+ * ... | | | |
+ * interrupt source # X-1 +++-->|________________|
+ * | | |
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | | |
+ * | +-->| |
+ * | | | | channel # 1 |--------->interrupt out # 1
+ * | | +>| |
+ * | | | | |
+ * | | | |________________|
+ * | | |
+ * | | |
+ * | | | ...
+ * | | | ...
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | |
+ * +-->| |
+ * | | channel # N |--------->interrupt out # N
+ * +>| |
+ * | |
+ * |________________|
+ *
+ *
+ * N: Interrupt Channel Instance Number (N=7)
+ * X: Interrupt Source Number for each channel (X=32)
+ *
+ * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32
+ * interrupt sources and generates 1 interrupt output.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define CHANIER(n) (0x10 + (0x40 * n))
+#define CHANIPR(n) (0x20 + (0x40 * n))
+
+#define CHAN_MAX_NUM 0x8
+
+struct intmux_irqchip_data {
+ int chanidx;
+ int irq;
+ struct irq_domain *domain;
+};
+
+struct intmux_data {
+ raw_spinlock_t lock;
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int channum;
+ struct intmux_irqchip_data irqchip_data[];
+};
+
+static void imx_intmux_irq_mask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* disable the interrupt source of this channel */
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_intmux_irq_unmask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* enable the interrupt source of this channel */
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static struct irq_chip imx_intmux_irq_chip = {
+ .name = "intmux",
+ .irq_mask = imx_intmux_irq_mask,
+ .irq_unmask = imx_intmux_irq_unmask,
+};
+
+static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+
+ /*
+ * two cells needed in interrupt specifier:
+ * the 1st cell: hw interrupt number
+ * the 2nd cell: channel index
+ */
+ if (WARN_ON(intsize != 2))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[1] >= data->channum))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return false;
+
+ return irqchip_data->chanidx == fwspec->param[1];
+}
+
+static const struct irq_domain_ops imx_intmux_domain_ops = {
+ .map = imx_intmux_irq_map,
+ .xlate = imx_intmux_irq_xlate,
+ .select = imx_intmux_irq_select,
+};
+
+static void imx_intmux_irq_handler(struct irq_desc *desc)
+{
+ struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc);
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long irqstat;
+ int pos, virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ /* read the interrupt source pending status of this channel */
+ irqstat = readl_relaxed(data->regs + CHANIPR(idx));
+
+ for_each_set_bit(pos, &irqstat, 32) {
+ virq = irq_find_mapping(irqchip_data->domain, pos);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_intmux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct irq_domain *domain;
+ struct intmux_data *data;
+ int channum;
+ int i, ret;
+
+ channum = platform_irq_count(pdev);
+ if (channum == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (channum > CHAN_MAX_NUM) {
+ dev_err(&pdev->dev, "supports up to %d multiplex channels\n",
+ CHAN_MAX_NUM);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data) +
+ channum * sizeof(data->irqchip_data[0]), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(data->ipg_clk)) {
+ ret = PTR_ERR(data->ipg_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ data->channum = channum;
+ raw_spin_lock_init(&data->lock);
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < channum; i++) {
+ data->irqchip_data[i].chanidx = i;
+
+ data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
+ if (data->irqchip_data[i].irq <= 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "failed to get irq\n");
+ goto out;
+ }
+
+ domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+ &data->irqchip_data[i]);
+ if (!domain) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out;
+ }
+ data->irqchip_data[i].domain = domain;
+
+ /* disable all interrupt sources of this channel firstly */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ imx_intmux_irq_handler,
+ &data->irqchip_data[i]);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
+}
+
+static int imx_intmux_remove(struct platform_device *pdev)
+{
+ struct intmux_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < data->channum; i++) {
+ /* disable all interrupt sources of this channel */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ NULL, NULL);
+
+ irq_domain_remove(data->irqchip_data[i].domain);
+ }
+
+ clk_disable_unprepare(data->ipg_clk);
+
+ return 0;
+}
+
+static const struct of_device_id imx_intmux_id[] = {
+ { .compatible = "fsl,imx-intmux", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver imx_intmux_driver = {
+ .driver = {
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ },
+ .probe = imx_intmux_probe,
+ .remove = imx_intmux_remove,
+};
+builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 01d18b39069e..c5589ee0dfb3 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -17,7 +17,6 @@
#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/mach-jz4740/irq.h>
struct ingenic_intc_data {
void __iomem *base;
@@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data)
while (pending) {
int bit = __fls(pending);
- irq = irq_find_mapping(domain, bit + (i * 32));
+ irq = irq_linear_revmap(domain, bit + (i * 32));
generic_handle_irq(irq);
pending &= ~BIT(bit);
}
@@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- domain = irq_domain_add_legacy(node, num_chips * 32,
- JZ4740_IRQ_BASE, 0,
+ domain = irq_domain_add_linear(node, num_chips * 32,
&irq_generic_chip_ops, NULL);
if (!domain) {
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3f09f658e8e2..6b566bba263b 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = {
.name = "Hisilicon MBIGEN-V2",
.of_match_table = mbigen_of_match,
.acpi_match_table = ACPI_PTR(mbigen_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = mbigen_device_probe,
};
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 829084b568fa..ccc7f823911b 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -24,50 +24,101 @@
#define REG_PIN_47_SEL 0x08
#define REG_FILTER_SEL 0x0c
+/* use for A1 like chips */
+#define REG_PIN_A1_SEL 0x04
+
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
* bits 24 to 31. Tests on the actual HW show that these bits are
* stuck at 0. Bits 8 to 15 are responsive and have the expected
* effect.
*/
-#define REG_EDGE_POL_EDGE(x) BIT(x)
-#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
-#define REG_BOTH_EDGE(x) BIT(8 + (x))
-#define REG_EDGE_POL_MASK(x) ( \
- REG_EDGE_POL_EDGE(x) | \
- REG_EDGE_POL_LOW(x) | \
- REG_BOTH_EDGE(x))
+#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x))
+#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x))
+#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x))
+#define REG_EDGE_POL_MASK(params, x) ( \
+ REG_EDGE_POL_EDGE(params, x) | \
+ REG_EDGE_POL_LOW(params, x) | \
+ REG_BOTH_EDGE(params, x))
#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+struct meson_gpio_irq_controller;
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl);
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq);
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+
+struct irq_ctl_ops {
+ void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+ void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+};
+
struct meson_gpio_irq_params {
unsigned int nr_hwirq;
bool support_edge_both;
+ unsigned int edge_both_offset;
+ unsigned int edge_single_offset;
+ unsigned int pol_low_offset;
+ unsigned int pin_sel_mask;
+ struct irq_ctl_ops ops;
};
+#define INIT_MESON_COMMON(irqs, init, sel) \
+ .nr_hwirq = irqs, \
+ .ops = { \
+ .gpio_irq_init = init, \
+ .gpio_irq_sel_pin = sel, \
+ },
+
+#define INIT_MESON8_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
+ meson8_gpio_irq_sel_pin) \
+ .edge_single_offset = 0, \
+ .pol_low_offset = 16, \
+ .pin_sel_mask = 0xff, \
+
+#define INIT_MESON_A1_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin) \
+ .support_edge_both = true, \
+ .edge_both_offset = 16, \
+ .edge_single_offset = 8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0x7f, \
+
static const struct meson_gpio_irq_params meson8_params = {
- .nr_hwirq = 134,
+ INIT_MESON8_COMMON_DATA(134)
};
static const struct meson_gpio_irq_params meson8b_params = {
- .nr_hwirq = 119,
+ INIT_MESON8_COMMON_DATA(119)
};
static const struct meson_gpio_irq_params gxbb_params = {
- .nr_hwirq = 133,
+ INIT_MESON8_COMMON_DATA(133)
};
static const struct meson_gpio_irq_params gxl_params = {
- .nr_hwirq = 110,
+ INIT_MESON8_COMMON_DATA(110)
};
static const struct meson_gpio_irq_params axg_params = {
- .nr_hwirq = 100,
+ INIT_MESON8_COMMON_DATA(100)
};
static const struct meson_gpio_irq_params sm1_params = {
- .nr_hwirq = 100,
+ INIT_MESON8_COMMON_DATA(100)
.support_edge_both = true,
+ .edge_both_offset = 8,
+};
+
+static const struct meson_gpio_irq_params a1_params = {
+ INIT_MESON_A1_COMMON_DATA(62)
};
static const struct of_device_id meson_irq_gpio_matches[] = {
@@ -78,6 +129,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
+ { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ }
};
@@ -100,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
writel_relaxed(tmp, ctl->base + reg);
}
-static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+{
+}
+
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq)
+{
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ bit_offset = REG_PIN_SEL_SHIFT(channel);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq)
{
- return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ bit_offset = ((channel % 2) == 0) ? 0 : 16;
+ reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+/* For a1 or later chips like a1 there is a switch to enable/disable irq */
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl)
+{
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31));
}
static int
@@ -110,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long hwirq,
u32 **channel_hwirq)
{
- unsigned int reg, idx;
+ unsigned int idx;
spin_lock(&ctl->lock);
@@ -129,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
* Setup the mux of the channel to route the signal of the pad
* to the appropriate input of the GIC
*/
- reg = meson_gpio_irq_channel_to_reg(idx);
- meson_gpio_irq_update_bits(ctl, reg,
- 0xff << REG_PIN_SEL_SHIFT(idx),
- hwirq << REG_PIN_SEL_SHIFT(idx));
+ ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq);
/*
* Get the hwirq number assigned to this channel through
@@ -173,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
{
u32 val = 0;
unsigned int idx;
+ const struct meson_gpio_irq_params *params;
+ params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@@ -190,22 +275,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
* precedence over the other edge/polarity settings
*/
if (type == IRQ_TYPE_EDGE_BOTH) {
- if (!ctl->params->support_edge_both)
+ if (!params->support_edge_both)
return -EINVAL;
- val |= REG_BOTH_EDGE(idx);
+ val |= REG_BOTH_EDGE(params, idx);
} else {
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_EDGE(idx);
+ val |= REG_EDGE_POL_EDGE(params, idx);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_LOW(idx);
+ val |= REG_EDGE_POL_LOW(params, idx);
}
spin_lock(&ctl->lock);
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
- REG_EDGE_POL_MASK(idx), val);
+ REG_EDGE_POL_MASK(params, idx), val);
spin_unlock(&ctl->lock);
@@ -371,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
return ret;
}
+ ctl->params->ops.gpio_irq_init(ctl);
+
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f3985469c221..d70507133c1d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node,
__sync();
}
- mips_gic_base = ioremap_nocache(gic_base, gic_len);
+ mips_gic_base = ioremap(gic_base, gic_len);
gicconfig = read_gic_config();
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index a166d30deea2..f747e2209ea9 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
handle_IRQ(irq, regs);
}
-static int nvic_irq_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq, unsigned int *type)
-{
- if (WARN_ON(fwspec->param_count < 1))
- return -EINVAL;
- *hwirq = fwspec->param[0];
- *type = IRQ_TYPE_NONE;
- return 0;
-}
-
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = arg;
- ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
@@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops nvic_irq_domain_ops = {
- .translate = nvic_irq_domain_translate,
+ .translate = irq_domain_translate_onecell,
.alloc = nvic_irq_domain_alloc,
.free = irq_domain_free_irqs_top,
};
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index f82bc60a6793..6e5e3172796b 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
goto err0;
}
- i->iomem = devm_ioremap_nocache(dev, io[k]->start,
+ i->iomem = devm_ioremap(dev, io[k]->start,
resource_size(io[k]));
if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n");
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 0aca5807a119..aa4af886e43a 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -154,15 +154,37 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
- irq_set_chip_data(irq, NULL);
+ irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
irq_set_noprobe(irq);
return 0;
}
+static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct irq_domain_ops plic_irqdomain_ops = {
- .map = plic_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
+ .translate = irq_domain_translate_onecell,
+ .alloc = plic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
};
static struct irq_domain *plic_irqdomain;
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index b7e0ae1af8fa..e8922fa03379 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash,
switch (id) {
case AS_LED_FLASH:
flash->flash_node = child;
+ fwnode_handle_get(child);
break;
case AS_LED_INDICATOR:
flash->indicator_node = child;
+ fwnode_handle_get(child);
break;
default:
dev_warn(&flash->client->dev,
"unknown LED %u encountered, ignoring\n", id);
break;
}
- fwnode_handle_get(child);
}
if (!flash->flash_node) {
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index a5c73f3d5f79..2bf74595610f 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct gpio_led led = {};
const char *state = NULL;
+ /*
+ * Acquire gpiod from DT with uninitialized label, which
+ * will be updated after LED class device is registered,
+ * Only then the final LED name is known.
+ */
led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
GPIOD_ASIS,
- led.name);
+ NULL);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
return ERR_CAST(led.gpiod);
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
fwnode_handle_put(child);
return ERR_PTR(ret);
}
+ /* Set gpiod label to match the corresponding LED name. */
+ gpiod_set_consumer_name(led_dat->gpiod,
+ led_dat->cdev.dev->kobj.name);
priv->num_leds++;
}
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 0507c6575c08..491268bb34a7 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// TI LM3532 LED driver
// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
#include <linux/i2c.h>
#include <linux/leds.h>
@@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3532_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "To many LED string defined\n");
+ dev_err(&priv->client->dev, "Too many LED string defined\n");
continue;
}
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
index 4c2d0b3c6dad..a0d4b725c917 100644
--- a/drivers/leds/leds-max77650.c
+++ b/drivers/leds/leds-max77650.c
@@ -135,9 +135,16 @@ err_node_put:
return rv;
}
+static const struct of_device_id max77650_led_of_match[] = {
+ { .compatible = "maxim,max77650-led" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
static struct platform_driver max77650_led_driver = {
.driver = {
.name = "max77650-led",
+ .of_match_table = max77650_led_of_match,
},
.probe = max77650_led_probe,
};
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index db5af83f0cec..b6447c1721b4 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev,
{
if (brightness)
set_latch_u5(LO_ULED, 0);
-
else
set_latch_u5(0, LO_ULED);
}
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 718729c89440..3abcafe46278 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void)
module_init(pattern_trig_init);
module_exit(pattern_trig_exit);
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
MODULE_DESCRIPTION("LED Pattern trigger");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
index 9534503b69d9..47b67c6bff7a 100644
--- a/drivers/lightnvm/pblk-trace.h
+++ b/drivers/lightnvm/pblk-trace.h
@@ -46,7 +46,7 @@ TRACE_EVENT(pblk_chunk_reset,
TP_STRUCT__entry(
__string(name, name)
__field(u64, ppa)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -72,7 +72,7 @@ TRACE_EVENT(pblk_chunk_state,
TP_STRUCT__entry(
__string(name, name)
__field(u64, ppa)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -98,7 +98,7 @@ TRACE_EVENT(pblk_line_state,
TP_STRUCT__entry(
__string(name, name)
__field(int, line)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -121,7 +121,7 @@ TRACE_EVENT(pblk_state,
TP_STRUCT__entry(
__string(name, name)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 9198c1b480d9..adf26a21fcd1 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -301,6 +301,7 @@ struct cached_dev {
struct block_device *bdev;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
struct closure sb_write;
@@ -403,6 +404,7 @@ enum alloc_reserve {
struct cache {
struct cache_set *set;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index cffcdc9feefb..4385303836d8 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
* Our temporary buffer is the same size as the btree node's
* buffer, we can just swap buffers instead of doing a big
* memcpy()
+ *
+ * Don't worry event 'out' is allocated from mempool, it can
+ * still be swapped here. Because state->pool is a page mempool
+ * creaated by by mempool_init_page_pool(), which allocates
+ * pages by alloc_pages() indeed.
*/
out->magic = b->set->data->magic;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 14d6c33b0957..fa872df4e770 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -734,34 +734,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
i = 0;
btree_cache_used = c->btree_cache_used;
- list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
if (nr <= 0)
goto out;
- if (++i > 3 &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
}
nr--;
+ i++;
}
- for (; (nr--) && i < btree_cache_used; i++) {
- if (list_empty(&c->btree_cache))
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (nr <= 0 || i >= btree_cache_used)
goto out;
- b = list_first_entry(&c->btree_cache, struct btree, list);
- list_rotate_left(&c->btree_cache);
-
- if (!b->accessed &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
freed++;
- } else
- b->accessed = 0;
+ }
+
+ nr--;
+ i++;
}
out:
mutex_unlock(&c->bucket_lock);
@@ -1069,7 +1067,6 @@ retry:
BUG_ON(!b->written);
b->parent = parent;
- b->accessed = 1;
for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
prefetch(b->keys.set[i].tree);
@@ -1160,7 +1157,6 @@ retry:
goto retry;
}
- b->accessed = 1;
b->parent = parent;
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 76cfd121a486..f4dcca449391 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -121,8 +121,6 @@ struct btree {
/* Key/pointer for this btree node */
BKEY_PADDED(key);
- /* Single bit - set when accessed, cleared by shrinker */
- unsigned long accessed;
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index be2a2a201603..33ddc5269e8d 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -417,10 +417,14 @@ err:
/* Journalling */
+#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
+
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
- unsigned int i, n;
+ unsigned int i, nr, ref_nr;
+ atomic_t *fifo_front_p, *now_fifo_front_p;
+ size_t mask;
if (c->journal.btree_flushing)
return;
@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
c->journal.btree_flushing = true;
spin_unlock(&c->journal.flush_write_lock);
+ /* get the oldest journal entry and check its refcount */
+ spin_lock(&c->journal.lock);
+ fifo_front_p = &fifo_front(&c->journal.pin);
+ ref_nr = atomic_read(fifo_front_p);
+ if (ref_nr <= 0) {
+ /*
+ * do nothing if no btree node references
+ * the oldest journal entry
+ */
+ spin_unlock(&c->journal.lock);
+ goto out;
+ }
+ spin_unlock(&c->journal.lock);
+
+ mask = c->journal.pin.mask;
+ nr = 0;
atomic_long_inc(&c->flush_write);
memset(btree_nodes, 0, sizeof(btree_nodes));
- n = 0;
mutex_lock(&c->bucket_lock);
list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ /*
+ * It is safe to get now_fifo_front_p without holding
+ * c->journal.lock here, because we don't need to know
+ * the exactly accurate value, just check whether the
+ * front pointer of c->journal.pin is changed.
+ */
+ now_fifo_front_p = &fifo_front(&c->journal.pin);
+ /*
+ * If the oldest journal entry is reclaimed and front
+ * pointer of c->journal.pin changes, it is unnecessary
+ * to scan c->btree_cache anymore, just quit the loop and
+ * flush out what we have already.
+ */
+ if (now_fifo_front_p != fifo_front_p)
+ break;
+ /*
+ * quit this loop if all matching btree nodes are
+ * scanned and record in btree_nodes[] already.
+ */
+ ref_nr = atomic_read(fifo_front_p);
+ if (nr >= ref_nr)
+ break;
+
if (btree_node_journal_flush(b))
pr_err("BUG: flush_write bit should not be set here!");
@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
continue;
}
+ /*
+ * Only select the btree node which exactly references
+ * the oldest journal entry.
+ *
+ * If the journal entry pointed by fifo_front_p is
+ * reclaimed in parallel, don't worry:
+ * - the list_for_each_xxx loop will quit when checking
+ * next now_fifo_front_p.
+ * - If there are matched nodes recorded in btree_nodes[],
+ * they are clean now (this is why and how the oldest
+ * journal entry can be reclaimed). These selected nodes
+ * will be ignored and skipped in the folowing for-loop.
+ */
+ if (nr_to_fifo_front(btree_current_write(b)->journal,
+ fifo_front_p,
+ mask) != 0) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
set_btree_node_journal_flush(b);
mutex_unlock(&b->write_lock);
- btree_nodes[n++] = b;
- if (n == BTREE_FLUSH_NR)
+ btree_nodes[nr++] = b;
+ /*
+ * To avoid holding c->bucket_lock too long time,
+ * only scan for BTREE_FLUSH_NR matched btree nodes
+ * at most. If there are more btree nodes reference
+ * the oldest journal entry, try to flush them next
+ * time when btree_flush_write() is called.
+ */
+ if (nr == BTREE_FLUSH_NR)
break;
}
mutex_unlock(&c->bucket_lock);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
pr_err("BUG: btree_nodes[%d] is NULL", i);
@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
mutex_unlock(&b->write_lock);
}
+out:
spin_lock(&c->journal.flush_write_lock);
c->journal.btree_flushing = false;
spin_unlock(&c->journal.flush_write_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 77e9869345e7..3dea1d5acd5c 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -15,7 +15,6 @@
#include "writeback.h"
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
#include <linux/idr.h>
@@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq;
/* Superblock */
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
- struct page **res)
+ struct cache_sb_disk **res)
{
const char *err;
- struct cache_sb *s;
- struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+ struct cache_sb_disk *s;
+ struct page *page;
unsigned int i;
- if (!bh)
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(page))
return "IO error";
-
- s = (struct cache_sb *) bh->b_data;
+ s = page_address(page) + offset_in_page(SB_OFFSET);
sb->offset = le64_to_cpu(s->offset);
sb->version = le64_to_cpu(s->version);
@@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
}
sb->last_mount = (u32)ktime_get_real_seconds();
- err = NULL;
-
- get_page(bh->b_page);
- *res = bh->b_page;
+ *res = s;
+ return NULL;
err:
- put_bh(bh);
+ put_page(page);
return err;
}
@@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio)
closure_put(&dc->sb_write);
}
-static void __write_super(struct cache_sb *sb, struct bio *bio)
+static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
+ struct bio *bio)
{
- struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned int i;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_iter.bi_size = SB_SIZE;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
- bch_bio_map(bio, NULL);
+ __bio_add_page(bio, virt_to_page(out), SB_SIZE,
+ offset_in_page(out));
out->offset = cpu_to_le64(sb->offset);
out->version = cpu_to_le64(sb->version);
@@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_reset(bio);
+ bio_init(bio, dc->sb_bv, 1);
bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
closure_get(cl);
/* I/O request sent to backing device */
- __write_super(&dc->sb, bio);
+ __write_super(&dc->sb, dc->sb_disk, bio);
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
@@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c)
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
- bio_reset(bio);
+ bio_init(bio, ca->sb_bv, 1);
bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
closure_get(cl);
- __write_super(&ca->sb, bio);
+ __write_super(&ca->sb, ca->sb_disk, bio);
}
closure_return_with_destructor(cl, bcache_write_super_unlock);
@@ -1275,6 +1273,9 @@ static void cached_dev_free(struct closure *cl)
mutex_unlock(&bch_register_lock);
+ if (dc->sb_disk)
+ put_page(virt_to_page(dc->sb_disk));
+
if (!IS_ERR_OR_NULL(dc->bdev))
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1350,7 +1351,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
-static int register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev,
struct cached_dev *dc)
{
@@ -1362,11 +1363,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page,
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
-
- bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
-
+ dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
@@ -2136,8 +2133,8 @@ void bch_cache_release(struct kobject *kobj)
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&ca->free[i]);
- if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(bio_first_page_all(&ca->sb_bio));
+ if (ca->sb_disk)
+ put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -2259,7 +2256,7 @@ err_free:
return ret;
}
-static int register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev, struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
@@ -2269,10 +2266,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
-
- bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
+ ca->sb_disk = sb_disk;
if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
@@ -2372,29 +2366,35 @@ static bool bch_is_open(struct block_device *bdev)
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = -EINVAL;
- const char *err = "cannot allocate memory";
+ const char *err;
char *path = NULL;
- struct cache_sb *sb = NULL;
- struct block_device *bdev = NULL;
- struct page *sb_page = NULL;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+ ssize_t ret;
+ ret = -EBUSY;
+ err = "failed to reference bcache module";
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ goto out;
/* For latest state of bcache_is_reboot */
smp_mb();
+ err = "bcache is in reboot";
if (bcache_is_reboot)
- return -EBUSY;
+ goto out_module_put;
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
- goto err;
+ goto out_module_put;
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
- goto err;
+ goto out_free_path;
+ ret = -EINVAL;
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
@@ -2411,57 +2411,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto quiet_out;
+ goto done;
}
- goto err;
+ goto out_free_sb;
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
- goto err_close;
+ goto out_blkdev_put;
- err = read_super(sb, bdev, &sb_page);
+ err = read_super(sb, bdev, &sb_disk);
if (err)
- goto err_close;
+ goto out_blkdev_put;
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
- goto err_close;
+ goto out_put_sb_page;
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_page, bdev, dc);
+ ret = register_bdev(sb, sb_disk, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
- goto err;
+ goto out_free_sb;
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
- goto err_close;
+ goto out_put_sb_page;
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err;
+ if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ goto out_free_sb;
}
-quiet_out:
- ret = size;
-out:
- if (sb_page)
- put_page(sb_page);
+
+done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
- return ret;
+ return size;
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
- pr_info("error %s: %s", path, err);
- goto out;
+out_put_sb_page:
+ put_page(virt_to_page(sb_disk));
+out_blkdev_put:
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+ path = NULL;
+out_module_put:
+ module_put(THIS_MODULE);
+out:
+ pr_info("error %s: %s", path?path:"", err);
+ return ret;
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3c50c4e4da8f..963d3774c93e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -17,7 +17,7 @@
#include <linux/dm-bufio.h>
#define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
#define DM_PREFETCH_CHUNKS 12
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 3ad18246fcb3..e230052c2107 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1019,8 +1019,6 @@ void md_bitmap_unplug(struct bitmap *bitmap)
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
for (i = 0; i < bitmap->storage.file_pages; i++) {
- if (!bitmap->storage.filemap)
- return;
dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
@@ -1338,7 +1336,8 @@ void md_bitmap_daemon_work(struct mddev *mddev)
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
break;
- if (test_and_clear_page_attr(bitmap, j,
+ if (bitmap->storage.filemap &&
+ test_and_clear_page_attr(bitmap, j,
BITMAP_PAGE_NEEDWRITE)) {
write_page(bitmap, bitmap->storage.filemap[j], 0);
}
@@ -1790,8 +1789,8 @@ void md_bitmap_destroy(struct mddev *mddev)
return;
md_bitmap_wait_behind_writes(mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, true);
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
@@ -1908,7 +1907,7 @@ int md_bitmap_load(struct mddev *mddev)
goto out;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, true);
+ mddev_create_serial_pool(mddev, rdev, true);
if (mddev_is_clustered(mddev))
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
@@ -2475,16 +2474,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
if (backlog > COUNTER_MAX)
return -EINVAL;
mddev->bitmap_info.max_write_behind = backlog;
- if (!backlog && mddev->wb_info_pool) {
- /* wb_info_pool is not needed if backlog is zero */
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- } else if (backlog && !mddev->wb_info_pool) {
- /* wb_info_pool is needed since backlog is not zero */
+ if (!backlog && mddev->serial_info_pool) {
+ /* serial_info_pool is not needed if backlog is zero */
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, false);
+ } else if (backlog && !mddev->serial_info_pool) {
+ /* serial_info_pool is needed since backlog is not zero */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
}
if (old_mwb != backlog)
md_bitmap_update_sb(mddev->bitmap);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e7c9f398bc6..4824d50526fa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static int rdev_init_wb(struct md_rdev *rdev)
+static void rdev_uninit_serial(struct md_rdev *rdev)
{
- if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+ if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
+ return;
+
+ kvfree(rdev->serial);
+ rdev->serial = NULL;
+}
+
+static void rdevs_uninit_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ rdev_uninit_serial(rdev);
+}
+
+static int rdev_init_serial(struct md_rdev *rdev)
+{
+ /* serial_nums equals with BARRIER_BUCKETS_NR */
+ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
+ struct serial_in_rdev *serial = NULL;
+
+ if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- spin_lock_init(&rdev->wb_list_lock);
- INIT_LIST_HEAD(&rdev->wb_list);
- init_waitqueue_head(&rdev->wb_io_wait);
- set_bit(WBCollisionCheck, &rdev->flags);
+ serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+ GFP_KERNEL);
+ if (!serial)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < serial_nums; i++) {
+ struct serial_in_rdev *serial_tmp = &serial[i];
+
+ spin_lock_init(&serial_tmp->serial_lock);
+ serial_tmp->serial_rb = RB_ROOT_CACHED;
+ init_waitqueue_head(&serial_tmp->serial_io_wait);
+ }
+
+ rdev->serial = serial;
+ set_bit(CollisionCheck, &rdev->flags);
+
+ return 0;
+}
+
+static int rdevs_init_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ int ret = 0;
+
+ rdev_for_each(rdev, mddev) {
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ break;
+ }
+
+ /* Free all resources if pool is not existed */
+ if (ret && !mddev->serial_info_pool)
+ rdevs_uninit_serial(mddev);
+
+ return ret;
}
/*
- * Create wb_info_pool if rdev is the first multi-queue device flaged
- * with writemostly, also write-behind mode is enabled.
+ * rdev needs to enable serial stuffs if it meets the conditions:
+ * 1. it is multi-queue device flaged with writemostly.
+ * 2. the write-behind mode is enabled.
*/
-void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend)
+static int rdev_need_serial(struct md_rdev *rdev)
{
- if (mddev->bitmap_info.max_write_behind == 0)
- return;
+ return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
+ rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ test_bit(WriteMostly, &rdev->flags));
+}
+
+/*
+ * Init resource for rdev(s), then create serial_info_pool if:
+ * 1. rdev is the first device which return true from rdev_enable_serial.
+ * 2. rdev is NULL, means we want to enable serialization for all rdevs.
+ */
+void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ int ret = 0;
- if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+ if (rdev && !rdev_need_serial(rdev) &&
+ !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool == NULL) {
+ if (!is_suspend)
+ mddev_suspend(mddev);
+
+ if (!rdev)
+ ret = rdevs_init_serial(mddev);
+ else
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ goto abort;
+
+ if (mddev->serial_info_pool == NULL) {
unsigned int noio_flag;
- if (!is_suspend)
- mddev_suspend(mddev);
noio_flag = memalloc_noio_save();
- mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
memalloc_noio_restore(noio_flag);
- if (!mddev->wb_info_pool)
- pr_err("can't alloc memory pool for writemostly\n");
- if (!is_suspend)
- mddev_resume(mddev);
+ if (!mddev->serial_info_pool) {
+ rdevs_uninit_serial(mddev);
+ pr_err("can't alloc memory pool for serialization\n");
+ }
}
+
+abort:
+ if (!is_suspend)
+ mddev_resume(mddev);
}
-EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
/*
- * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
+ * Free resource from rdev(s), and destroy serial_info_pool under conditions:
+ * 1. rdev is the last device flaged with CollisionCheck.
+ * 2. when bitmap is destroyed while policy is not enabled.
+ * 3. for disable policy, the pool is destroyed only when no rdev needs it.
*/
-static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
+void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
{
- if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
+ if (rdev && !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool) {
+ if (mddev->serial_info_pool) {
struct md_rdev *temp;
- int num = 0;
+ int num = 0; /* used to track if other rdevs need the pool */
- /*
- * Check if other rdevs need wb_info_pool.
- */
- rdev_for_each(temp, mddev)
- if (temp != rdev &&
- test_bit(WBCollisionCheck, &temp->flags))
+ if (!is_suspend)
+ mddev_suspend(mddev);
+ rdev_for_each(temp, mddev) {
+ if (!rdev) {
+ if (!mddev->serialize_policy ||
+ !rdev_need_serial(temp))
+ rdev_uninit_serial(temp);
+ else
+ num++;
+ } else if (temp != rdev &&
+ test_bit(CollisionCheck, &temp->flags))
num++;
- if (!num) {
- mddev_suspend(rdev->mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- mddev_resume(rdev->mddev);
}
+
+ if (rdev)
+ rdev_uninit_serial(rdev);
+
+ if (num)
+ pr_info("The mempool could be used by other devices\n");
+ else {
+ mempool_destroy(mddev->serial_info_pool);
+ mddev->serial_info_pool = NULL;
+ }
+ if (!is_suspend)
+ mddev_resume(mddev);
}
}
@@ -2337,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
pr_debug("md: bind<%s>\n", b);
if (mddev->raid_disks)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2375,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2888,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
- mddev_create_wb_pool(rdev->mddev, rdev, false);
+ mddev_create_serial_pool(rdev->mddev, rdev, false);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
@@ -5277,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev =
__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
fail_last_dev_store);
+static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
+{
+ if (mddev->pers == NULL || (mddev->pers->level != 1))
+ return sprintf(page, "n/a\n");
+ else
+ return sprintf(page, "%d\n", mddev->serialize_policy);
+}
+
+/*
+ * Setting serialize_policy to true to enforce write IO is not reordered
+ * for raid1.
+ */
+static ssize_t
+serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int err;
+ bool value;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ if (value == mddev->serialize_policy)
+ return len;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ pr_err("md: serialize_policy is only effective for raid1\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ mddev_suspend(mddev);
+ if (value)
+ mddev_create_serial_pool(mddev, NULL, true);
+ else
+ mddev_destroy_serial_pool(mddev, NULL, true);
+ mddev->serialize_policy = value;
+ mddev_resume(mddev);
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_serialize_policy =
+__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
+ serialize_policy_store);
+
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
@@ -5294,6 +5436,7 @@ static struct attribute *md_default_attrs[] = {
&max_corr_read_errors.attr,
&md_consistency_policy.attr,
&md_fail_last_dev.attr,
+ &md_serialize_policy.attr,
NULL,
};
@@ -5769,18 +5912,18 @@ int md_run(struct mddev *mddev)
goto bitmap_abort;
if (mddev->bitmap_info.max_write_behind > 0) {
- bool creat_pool = false;
+ bool create_pool = false;
rdev_for_each(rdev, mddev) {
if (test_bit(WriteMostly, &rdev->flags) &&
- rdev_init_wb(rdev))
- creat_pool = true;
- }
- if (creat_pool && mddev->wb_info_pool == NULL) {
- mddev->wb_info_pool =
- mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
- if (!mddev->wb_info_pool) {
+ rdev_init_serial(rdev))
+ create_pool = true;
+ }
+ if (create_pool && mddev->serial_info_pool == NULL) {
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
+ if (!mddev->serial_info_pool) {
err = -ENOMEM;
goto bitmap_abort;
}
@@ -6025,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ /* disable policy to guarantee rdevs free resources for serialization */
+ mddev->serialize_policy = 0;
+ mddev_destroy_serial_pool(mddev, NULL, true);
}
void md_stop_writes(struct mddev *mddev)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5f86f8adb0a4..acd681939112 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -32,6 +32,16 @@
* be retried.
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+
+/*
+ * The struct embedded in rdev is used to serialize IO.
+ */
+struct serial_in_rdev {
+ struct rb_root_cached serial_rb;
+ spinlock_t serial_lock;
+ wait_queue_head_t serial_io_wait;
+};
+
/*
* MD's 'extended' device
*/
@@ -110,12 +120,7 @@ struct md_rdev {
* in superblock.
*/
- /*
- * The members for check collision of write behind IOs.
- */
- struct list_head wb_list;
- spinlock_t wb_list_lock;
- wait_queue_head_t wb_io_wait;
+ struct serial_in_rdev *serial; /* used for raid1 io serialization */
struct work_struct del_work; /* used for delayed sysfs removal */
@@ -201,9 +206,9 @@ enum flag_bits {
* it didn't fail, so don't use FailFast
* any more for metadata
*/
- WBCollisionCheck, /*
- * multiqueue device should check if there
- * is collision between write behind bios.
+ CollisionCheck, /*
+ * check if there is collision between raid1
+ * serial bios.
*/
};
@@ -263,12 +268,13 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_WB_INFOS 8
-/* record current range of write behind IOs */
-struct wb_info {
- sector_t lo;
- sector_t hi;
- struct list_head list;
+#define NR_SERIAL_INFOS 8
+/* record current range of serialize IOs */
+struct serial_info {
+ struct rb_node node;
+ sector_t start; /* start sector of rb node */
+ sector_t last; /* end sector of rb node */
+ sector_t _subtree_last; /* highest sector in subtree of rb node */
};
struct mddev {
@@ -487,13 +493,14 @@ struct mddev {
*/
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
- mempool_t *wb_info_pool;
+ mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
bool has_superblocks:1;
bool fail_last_dev:1;
+ bool serialize_policy:1;
};
enum recovery_flags {
@@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
-extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend);
+extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
+extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b7c20979bd19..322386ff5d22 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned short blksize = 512;
+ unsigned blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 201fd8aec59a..cd810e195086 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/interval_tree_generic.h>
#include <trace/events/block.h>
@@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#include "raid1-10.c"
-static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
+ START, LAST, static inline, raid1_rb);
+
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+ struct serial_info *si, int idx)
{
- struct wb_info *wi, *temp_wi;
unsigned long flags;
int ret = 0;
- struct mddev *mddev = rdev->mddev;
-
- wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(temp_wi, &rdev->wb_list, list) {
- /* collision happened */
- if (hi > temp_wi->lo && lo < temp_wi->hi) {
- ret = -EBUSY;
- break;
- }
+ sector_t lo = r1_bio->sector;
+ sector_t hi = lo + r1_bio->sectors;
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ /* collision happened */
+ if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+ ret = -EBUSY;
+ else {
+ si->start = lo;
+ si->last = hi;
+ raid1_rb_insert(si, &serial->serial_rb);
}
-
- if (!ret) {
- wi->lo = lo;
- wi->hi = hi;
- list_add(&wi->list, &rdev->wb_list);
- } else
- mempool_free(wi, mddev->wb_info_pool);
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
return ret;
}
-static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ struct mddev *mddev = rdev->mddev;
+ struct serial_info *si;
+ int idx = sector_to_idx(r1_bio->sector);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ if (WARN_ON(!mddev->serial_info_pool))
+ return;
+ si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+ wait_event(serial->serial_io_wait,
+ check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+}
+
+static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
{
- struct wb_info *wi;
+ struct serial_info *si;
unsigned long flags;
int found = 0;
struct mddev *mddev = rdev->mddev;
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(wi, &rdev->wb_list, list)
- if (hi == wi->hi && lo == wi->lo) {
- list_del(&wi->list);
- mempool_free(wi, mddev->wb_info_pool);
+ int idx = sector_to_idx(lo);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+ si; si = raid1_rb_iter_next(si, lo, hi)) {
+ if (si->start == lo && si->last == hi) {
+ raid1_rb_remove(si, &serial->serial_rb);
+ mempool_free(si, mddev->serial_info_pool);
found = 1;
break;
}
-
+ }
if (!found)
- WARN(1, "The write behind IO is not recorded\n");
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
- wake_up(&rdev->wb_io_wait);
+ WARN(1, "The write IO is not recorded for serialization\n");
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
+ wake_up(&serial->serial_io_wait);
}
/*
@@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio)
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
bool discard_error;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
@@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- remove_wb(rdev, lo, hi);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ remove_serial(rdev, lo, hi);
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio)
call_bio_endio(r1_bio);
}
}
- }
+ } else if (rdev->mddev->serialize_policy)
+ remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(rdev, conf->mddev);
@@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (i = 0; i < disks; i++) {
struct bio *mbio = NULL;
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (!r1_bio->bios[i])
continue;
@@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
- struct md_rdev *rdev = conf->mirrors[i].rdev;
-
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- wait_event(rdev->wb_io_wait,
- check_and_add_wb(rdev, lo, hi) == 0);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- }
+ } else if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
r1_bio->bios[i] = mbio;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d4d3b67ffbba..ba00e9877f02 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
static int alloc_thread_groups(struct r5conf *conf, int cnt,
int *group_cnt,
- int *worker_cnt_per_group,
struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
@@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
unsigned int new;
int err;
struct r5worker_group *new_groups, *old_groups;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
if (old_groups)
flush_workqueue(raid5_wq);
- err = alloc_thread_groups(conf, new,
- &group_cnt, &worker_cnt_per_group,
- &new_groups);
+ err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
if (!err) {
spin_lock_irq(&conf->device_lock);
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = new;
conf->worker_groups = new_groups;
spin_unlock_irq(&conf->device_lock);
@@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt,
- int *group_cnt,
- int *worker_cnt_per_group,
+static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
struct r5worker_group **worker_groups)
{
int i, j, k;
ssize_t size;
struct r5worker *workers;
- *worker_cnt_per_group = cnt;
if (cnt == 0) {
*group_cnt = 0;
*worker_groups = NULL;
@@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct disk_info *disk;
char pers_name[6];
int i;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
struct r5worker_group *new_group;
int ret;
@@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < PENDING_IO_MAX; i++)
list_add(&conf->pending_data[i].sibling, &conf->free_list);
/* Don't enable multi-threading by default*/
- if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
- &new_group)) {
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = 0;
conf->worker_groups = new_group;
} else
goto abort;
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 04d51ca63223..4c8c96a35282 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -105,7 +105,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (nums[i-1] + 1 != nums[i])
goto fail_map;
buf->vaddr = (__force void *)
- ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+ ioremap(__pfn_to_phys(nums[0]), size + offset);
} else {
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
PAGE_KERNEL);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index fd47bd07ffd8..2f1eeeb6e7c7 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -938,7 +938,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* map io memory */
CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
- cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
+ cx->enc_mem = ioremap(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 3f3f40ea890b..1f79700a6307 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1042,7 +1042,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* map io memory */
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
- itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
+ itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
@@ -1056,7 +1056,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
if (itv->has_cx23415) {
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
- itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
+ itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
@@ -1075,7 +1075,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
- ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+ ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 95a56cce9b65..1daf9e07cad7 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -37,7 +37,7 @@
#include <linux/ivtvfb.h>
#ifdef CONFIG_X86_64
-#include <asm/pat.h>
+#include <asm/memtype.h>
#endif
/* card parameters */
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index f299baf7cbe0..e06d113dfe96 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -883,7 +883,7 @@ static int dm355_ccdc_probe(struct platform_device *pdev)
goto fail_nores;
}
- ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
if (!ccdc_cfg.base_addr) {
status = -ENOMEM;
goto fail_nomem;
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 2fc6c9c38f9c..c6378c4e0074 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -817,7 +817,7 @@ static int dm644x_ccdc_probe(struct platform_device *pdev)
goto fail_nores;
}
- ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
if (!ccdc_cfg.base_addr) {
status = -ENOMEM;
goto fail_nomem;
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index e2e7ab7b7f45..b49378b18e5d 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1045,7 +1045,7 @@ static int isif_probe(struct platform_device *pdev)
status = -EBUSY;
goto fail_nobase_res;
}
- addr = ioremap_nocache(res->start, resource_size(res));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
status = -ENOMEM;
goto fail_base_iomap;
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index a99caac59f44..1ac0c70a5981 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -351,7 +351,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
if (cec->tegra_cec_irq <= 0)
return -EBUSY;
- cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ cec->cec_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!cec->cec_base) {
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f9ac22413000..1074b882c57c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -100,19 +100,19 @@ struct buflist {
* Function prototypes. Called from OS entry point mptctl_ioctl.
* arg contents specific to function.
*/
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
static void mptctl_remove(struct pci_dev *);
@@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
/*
* Private function calls.
*/
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
@@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* by TM and FW reloads.
*/
if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
- return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+ return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
} else if (cmd == MPTTARGETINFO) {
- return mptctl_gettargetinfo(arg);
+ return mptctl_gettargetinfo(iocp, arg);
} else if (cmd == MPTTEST) {
- return mptctl_readtest(arg);
+ return mptctl_readtest(iocp, arg);
} else if (cmd == MPTEVENTQUERY) {
- return mptctl_eventquery(arg);
+ return mptctl_eventquery(iocp, arg);
} else if (cmd == MPTEVENTENABLE) {
- return mptctl_eventenable(arg);
+ return mptctl_eventenable(iocp, arg);
} else if (cmd == MPTEVENTREPORT) {
- return mptctl_eventreport(arg);
+ return mptctl_eventreport(iocp, arg);
} else if (cmd == MPTFWREPLACE) {
- return mptctl_replace_fw(arg);
+ return mptctl_replace_fw(iocp, arg);
}
/* All of these commands require an interrupt or
@@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
if (cmd == MPTFWDOWNLOAD)
- ret = mptctl_fw_download(arg);
+ ret = mptctl_fw_download(iocp, arg);
else if (cmd == MPTCOMMAND)
- ret = mptctl_mpt_command(arg);
+ ret = mptctl_mpt_command(iocp, arg);
else if (cmd == MPTHARDRESET)
- ret = mptctl_do_reset(arg);
+ ret = mptctl_do_reset(iocp, arg);
else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
- ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+ ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
else if (cmd == HP_GETTARGETINFO)
- ret = mptctl_hp_targetinfo(arg);
+ ret = mptctl_hp_targetinfo(iocp, arg);
else
ret = -EINVAL;
@@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
struct mpt_ioctl_diag_reset krinfo;
- MPT_ADAPTER *iocp;
if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
@@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg)
return -EFAULT;
}
- if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
- __FILE__, __LINE__, krinfo.hdr.iocnum);
- return -ENODEV; /* (-6) No such device or address */
- }
-
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
iocp->name));
@@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
struct mpt_fw_xfer kfwdl;
@@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg)
return -EFAULT;
}
- return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+ return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
- MPT_ADAPTER *iocp;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
@@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
- if (mpt_verify_adapter(ioc, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
- ioc);
- return -ENODEV; /* (-6) No such device or address */
- } else {
-
- /* Valid device. Get a message frame and construct the FW download message.
- */
- if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
- return -EAGAIN;
- }
+ /* Valid device. Get a message frame and construct the FW download message.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+ return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
@@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
- dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n",
- iocp->name, ioc));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
@@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
* -ENODEV if no such device/adapter
*/
static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_iocinfo *karg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
- int iocnum;
unsigned int port;
int cim_rev;
struct scsi_device *sdev;
@@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
return PTR_ERR(karg);
}
- if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- kfree(karg);
- return -ENODEV;
- }
-
/* Verify the data transfer size is correct. */
if (karg->hdr.maxDataSize != data_size) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
@@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg;
- MPT_ADAPTER *ioc;
VirtDevice *vdevice;
char *pmem;
int *pdata;
- int iocnum;
int numDevices = 0;
int lun;
int maxWordsLeft;
@@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
/* Get the port number and set the maximum number of bytes
@@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_test __user *uarg = (void __user *) arg;
struct mpt_ioctl_test karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
@@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
@@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventquery karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
@@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
ioc->name));
karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
@@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
@@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
@@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventreport karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int numBytes, maxEvents, max;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
@@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
ioc->name));
@@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
@@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
@@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg)
* -ENOMEM if memory allocation error
*/
static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_command __user *uarg = (void __user *) arg;
struct mpt_ioctl_command karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int rc;
@@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
- rc = mptctl_do_mpt_command (karg, &uarg->MF);
+ rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
return rc;
}
@@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg)
* -EPERM if SCSI I/O and target is untagged
*/
static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
{
- MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *hdr;
char *psge;
@@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
int sgSize = 0; /* Num SG elements */
- int iocnum, flagsLength;
+ int flagsLength;
int sz, rc = 0;
int msgContext;
u16 req_idx;
@@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
bufIn.kptr = bufOut.kptr = NULL;
bufIn.len = bufOut.len = 0;
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
@@ -2418,17 +2321,15 @@ done_free_mem:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
hp_host_info_t __user *uarg = (void __user *) arg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
char *pbuf=NULL;
dma_addr_t buf_dma;
hp_host_info_t karg;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
- int iocnum;
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
@@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
ioc->name));
@@ -2659,15 +2554,13 @@ retry_wait:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
{
hp_target_info_t __user *uarg = (void __user *) arg;
SCSIDevicePage0_t *pg0_alloc;
SCSIDevicePage3_t *pg3_alloc;
- MPT_ADAPTER *ioc;
MPT_SCSI_HOST *hd = NULL;
hp_target_info_t karg;
- int iocnum;
int data_sz;
dma_addr_t page_dma;
CONFIGPARMS cfg;
@@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
@@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
kfw.fwlen = kfw32.fwlen;
kfw.bufp = compat_ptr(kfw32.bufp);
- ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+ ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
mutex_unlock(&iocp->ioctl_cmds.mutex);
@@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
/* Pass new structure to do_mpt_command
*/
- ret = mptctl_do_mpt_command (karg, &uarg->MF);
+ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index ebc00d47abf5..7d3784aa20e5 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -552,7 +552,7 @@ mpt_lan_close(struct net_device *dev)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
-mpt_lan_tx_timeout(struct net_device *dev)
+mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index fd7b2167103d..06038b325b02 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1512,7 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
bar = 1;
len = pci_resource_len(pcidev, bar);
base = pci_resource_start(pcidev, bar);
- pcr->remap_addr = ioremap_nocache(base, len);
+ pcr->remap_addr = ioremap(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
goto free_handle;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 6d27ccfe0680..3c2d405bc79b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
- device_del(&cdev->cdev);
put_device(dev);
cdev->dev = NULL;
- return device_add(&cdev->cdev);
+ return 0;
}
}
return -ENODEV;
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a4fdad04809a..de87693cf557 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
void lkdtm_UNSET_SMEP(void)
{
-#ifdef CONFIG_X86_64
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
void (*direct_write_cr4)(unsigned long val);
unsigned char *insn;
@@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void)
native_write_cr4(cr4);
}
#else
- pr_err("FAIL: this test is x86_64-only\n");
+ pr_err("XFAIL: this test is x86_64-only\n");
#endif
}
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void)
{
+#ifdef CONFIG_X86_32
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
@@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void)
asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
"r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
- panic("tried to double fault but didn't die\n");
-}
+ pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+ pr_err("XFAIL: this test is ia32-only\n");
#endif
+}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index c25fd40f3bd0..fcd999f50d14 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -788,7 +788,7 @@ scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
"failed to setup interrupts for %d\n", msg->src.node);
goto interrupt_setup_error;
}
- newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
+ newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len);
if (!newdev->mmio.va) {
dev_err(&scifdev->sdev->dev,
"failed to map mmio for %d\n", msg->src.node);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 359c5bab45ac..063e4419cd7e 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -834,7 +834,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
}
drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
drv_data->pti_ioaddr =
- ioremap_nocache((u32)drv_data->aperture_base,
+ ioremap((u32)drv_data->aperture_base,
APERTURE_LEN);
if (!drv_data->pti_ioaddr) {
retval = -ENOMEM;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index f7d610a22347..ada94e6a3c91 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -496,7 +496,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Deal with transmit timeouts coming from the network layer.
*/
static void
-xpnet_dev_tx_timeout(struct net_device *dev)
+xpnet_dev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
dev->stats.tx_errors++;
}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 5e6be1527571..b837e7eba5f7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 95b41c0891d0..663d87924e5e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
}
if (!err)
err = mmc_erase(card, from, nr, card->erase_arg);
@@ -1149,7 +1149,7 @@ retry:
arg == MMC_SECURE_TRIM1_ARG ?
INAND_CMD38_ARG_SECTRIM1 :
INAND_CMD38_ARG_SECERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
@@ -1167,7 +1167,7 @@ retry:
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index abf8f5eb0a1c..aa54d359dab7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work)
}
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ unsigned int freq = freqs[i];
+ if (freq > host->f_max) {
+ if (i + 1 < ARRAY_SIZE(freqs))
+ continue;
+ freq = host->f_max;
+ }
+ if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
break;
if (freqs[i] <= host->f_min)
break;
@@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
- host->f_init = max(freqs[0], host->f_min);
+ host->f_init = max(min(freqs[0], host->f_max), host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 105b7a7c0251..c8768726d925 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host)
struct device *dev = host->parent;
u32 bus_width, drv_type, cd_debounce_delay_ms;
int ret;
- bool cd_cap_invert, cd_gpio_invert = false;
- bool ro_cap_invert, ro_gpio_invert = false;
if (!dev || !dev_fwnode(dev))
return 0;
@@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host)
*/
/* Parse Card Detection */
+
if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
- cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+ if (device_property_read_bool(dev, "cd-inverted"))
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
if (device_property_read_u32(dev, "cd-debounce-delay-ms",
&cd_debounce_delay_ms))
@@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host)
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, false,
- cd_debounce_delay_ms * 1000,
- &cd_gpio_invert);
+ cd_debounce_delay_ms * 1000);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
-
- /*
- * There are two ways to flag that the CD line is inverted:
- * through the cd-inverted flag and by the GPIO line itself
- * being inverted from the GPIO subsystem. This is a leftover
- * from the times when the GPIO subsystem did not make it
- * possible to flag a line as inverted.
- *
- * If the capability on the host AND the GPIO line are
- * both inverted, the end result is that the CD line is
- * not inverted.
- */
- if (cd_cap_invert ^ cd_gpio_invert)
- host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
}
/* Parse Write Protection */
- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
+ if (device_property_read_bool(dev, "wp-inverted"))
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
@@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
- /* See the comment on CD inversion above */
- if (ro_cap_invert ^ ro_gpio_invert)
- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 09113b9ad679..da425ee2d9bf 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -19,7 +19,9 @@
#include "host.h"
#include "mmc_ops.h"
-#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
+#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
/*
* In cases when not allowed to poll by using CMD13 or because we aren't
* capable of polling by using ->card_busy(), then rely on waiting the
@@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_retune_hold(host);
+ if (!timeout_ms) {
+ pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+ mmc_hostname(host));
+ timeout_ms = card->ext_csd.generic_cmd6_time;
+ }
+
/*
- * If the cmd timeout and the max_busy_timeout of the host are both
- * specified, let's validate them. A failure means we need to prevent
- * the host from doing hw busy detection, which is done by converting
- * to a R1 response instead of a R1B.
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B.
*/
- if (timeout_ms && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
cmd.opcode = MMC_SWITCH;
@@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
cmd.flags = MMC_CMD_AC;
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- /*
- * A busy_timeout of zero means the host can decide to use
- * whatever value it finds suitable.
- */
cmd.busy_timeout = timeout_ms;
} else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
@@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card)
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
+ EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
if (err)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card)
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1, 0);
+ EXT_CSD_FLUSH_CACHE, 1,
+ MMC_CACHE_FLUSH_TIMEOUT_MS);
if (err)
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index da2596c5fa28..05e907451df9 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -19,7 +19,6 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
char *cd_label;
@@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host)
return -ENOSYS;
cansleep = gpiod_cansleep(ctx->cd_gpio);
- if (ctx->override_cd_active_level) {
- int value = cansleep ?
- gpiod_get_raw_value_cansleep(ctx->cd_gpio) :
- gpiod_get_raw_value(ctx->cd_gpio);
- return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
- }
-
return cansleep ?
gpiod_get_value_cansleep(ctx->cd_gpio) :
gpiod_get_value(ctx->cd_gpio);
@@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
* @idx: index of the GPIO to obtain in the consumer
* @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not, set
- * to NULL to ignore
*
* Note that this must be called prior to mmc_add_host()
* otherwise the caller must also call mmc_gpiod_request_cd_irq().
@@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
*/
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
@@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
ctx->cd_debounce_delay_ms = debounce / 1000;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ /* override forces default (active-low) polarity ... */
+ if (override_active_level && !gpiod_is_active_low(desc))
+ gpiod_toggle_active_low(desc);
+
+ /* ... or active-high */
+ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
- ctx->override_cd_active_level = override_active_level;
ctx->cd_gpio = desc;
return 0;
@@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not,
- * set to NULL to ignore
*
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int idx, unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
@@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return ret;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
ctx->ro_gpio = desc;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d06b2dfe3c95..3a5089f0332c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -501,6 +501,7 @@ config MMC_SDHCI_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB
tristate "Broadcom SDIO/SD/MMC support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC
depends on MMC_SDHCI_PLTFM
+ select MMC_CQHCI
default y
help
This selects support for the SDIO/SD/MMC Host Controller on
@@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
+ select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's DRA7 SOCs. The controller supports
@@ -1040,3 +1043,6 @@ config MMC_OWL
help
This selects support for the SD/MMC Host Controller on
Actions Semi Owl SoCs.
+
+config MMC_SDHCI_EXTERNAL_DMA
+ bool
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 6f065bb5c55a..aeaaa5314924 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
return clk_prepare_enable(host->mck);
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index bc8aeb47a7b4..8823680ca42c 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
goto out2;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&pdev->dev, "no IRQ defined\n");
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
goto out3;
- }
- host->irq = r->start;
mmc->ops = &au1xmmc_ops;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 99f61fd2a658..c3d949847cbd 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev)
host->dma_chan = NULL;
host->dma_desc = NULL;
- host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
+ host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
+ if (IS_ERR(host->dma_chan_rxtx)) {
+ ret = PTR_ERR(host->dma_chan_rxtx);
+ host->dma_chan_rxtx = NULL;
+
+ if (ret == -EPROBE_DEFER)
+ goto err;
+
+ /* Ignore errors to fall back to PIO mode */
+ }
+
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index eee08d81b242..76013bbbcff3 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
return ret;
host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
- if (!host->base)
- return -EINVAL;
+ if (!host->base) {
+ ret = -EINVAL;
+ goto error;
+ }
/* On ThunderX these are identical */
host->dma_base = host->base;
@@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->reg_off_dma = 0x160;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk))
- return PTR_ERR(host->clk);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error;
+ }
ret = clk_prepare_enable(host->clk);
if (ret)
- return ret;
+ goto error;
host->sys_freq = clk_get_rate(host->clk);
spin_lock_init(&host->irq_handler_lock);
@@ -157,6 +161,7 @@ error:
}
}
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
return ret;
}
@@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ebfaeb33bc8c..f01fecd75833 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
mmc->caps |= pdata->caps;
/* Register a cd gpio, if there is not one, enable polling */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index fc9d4d000f97..bc5278ab5707 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host)
if (!host->dms)
return -ENOMEM;
- host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
- if (!host->dms->ch) {
+ host->dms->ch = dma_request_chan(host->dev, "rx-tx");
+ if (IS_ERR(host->dms->ch)) {
+ int ret = PTR_ERR(host->dms->ch);
+
dev_err(host->dev, "Failed to get external DMA channel.\n");
kfree(host->dms);
host->dms = NULL;
- return -ENXIO;
+ return ret;
}
return 0;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 78383f60a3dc..fbae87d1f017 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev)
static int jz4740_mmc_resume(struct device *dev)
{
- return pinctrl_pm_select_default_state(dev);
+ return pinctrl_select_default_state(dev);
}
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e712315c7e8d..35400cf2a2e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -161,7 +161,6 @@ struct meson_host {
bool dram_access_quirk;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_clk_gate;
unsigned int bounce_buf_size;
@@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
u32 cfg;
if (host->pins_clk_gate)
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(host->dev);
/* Make sure the clock is not stopped in the controller */
cfg = readl(host->regs + SD_EMMC_CFG);
@@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
goto free_host;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- ret = PTR_ERR(host->pins_default);
- goto free_host;
- }
-
host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
"clk-gate");
if (IS_ERR(host->pins_clk_gate)) {
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ba9a63db73da..8b038e7b2cd3 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
struct platform_device *slot_pdev;
struct mmc_host *mmc;
struct meson_mx_mmc_host *host;
- struct resource *res;
int ret, irq;
u32 conf;
@@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(host->controller_dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto error_free_mmc;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74c6cfbf9172..951f76dc1ddd 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
+ *
+ * SPI_CS_HIGH means "asserted" here. In some cases like when using
+ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+ * inverted by gpiolib, so if we want to ascertain to drive it high
+ * we should toggle the default with an XOR as we do here.
*/
- host->spi->mode |= SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
@@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
@@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
- status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
+ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 40e72c30ea84..e9ffce8d41ea 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = {
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = {
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
.stm32_idmabsize_mask = GENMASK(12, 5),
.busy_timeout = true,
.busy_detect = true,
@@ -284,6 +289,7 @@ static struct variant_data variant_qcom = {
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
@@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- host->ops->dma_start(host, &datactrl);
+ ret = host->ops->dma_start(host, &datactrl);
+ if (ret)
+ return ret;
/* Trigger the DMA transfer */
mmci_write_datactrlreg(host, datactrl);
@@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host)
host->dma_priv = dmae;
- dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "rx");
- dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "tx");
+ dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(dmae->rx_channel)) {
+ int ret = PTR_ERR(dmae->rx_channel);
+ dmae->rx_channel = NULL;
+ return ret;
+ }
+
+ dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(dmae->tx_channel)) {
+ if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+ dev_warn(mmc_dev(host->mmc),
+ "Deferred probe for TX channel ignored\n");
+ dmae->tx_channel = NULL;
+ }
/*
* If only an RX channel is specified, the driver will
@@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
+ /*
+ * This is necessary to get SDIO working on the Ux500. We do not yet
+ * know if this is a bug in:
+ * - The Ux500 DMA controller (DMA40)
+ * - The MMCI DMA interface on the Ux500
+ * some power of two blocks (such as 64 bytes) are sent regularly
+ * during SDIO traffic and those work fine so for these we enable DMA
+ * transfers.
+ */
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+ return -EINVAL;
+
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host,
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
+ int ret;
host->dma_in_progress = true;
- dmaengine_submit(dmae->desc_current);
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+ if (ret < 0) {
+ host->dma_in_progress = false;
+ return ret;
+ }
dma_async_issue_pending(dmae->cur);
*datactrl |= MCI_DPSM_DMAENABLE;
@@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
return;
}
}
- mmci_request_end(host, host->mrq);
+
+ if (host->irq_action != IRQ_WAKE_THREAD)
+ mmci_request_end(host, host->mrq);
+
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
} else if (!host->variant->datactrl_first &&
@@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
u32 status;
- int ret = 0;
spin_lock(&host->lock);
+ host->irq_action = IRQ_HANDLED;
do {
status = readl(host->base + MMCISTATUS);
@@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
- ret = 1;
} while (status);
spin_unlock(&host->lock);
- return IRQ_RETVAL(ret);
+ return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ unsigned long flags;
+
+ if (host->rst) {
+ reset_control_assert(host->rst);
+ udelay(2);
+ reset_control_deassert(host->rst);
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
+
+ host->irq_action = IRQ_HANDLED;
+ mmci_request_end(host, host->mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return host->irq_action;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pinctrl_select_state(host->pinctrl, host->pins_opendrain);
else
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(mmc_dev(mmc));
}
/*
@@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev,
goto host_free;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- dev_err(mmc_dev(mmc), "Can't select default pins\n");
- ret = PTR_ERR(host->pins_default);
- goto host_free;
- }
-
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
MMCI_PINCTRL_STATE_OPENDRAIN);
if (IS_ERR(host->pins_opendrain)) {
@@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev,
* silently of these do not exist
*/
if (!np) {
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
- ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
- DRIVER_NAME " (cmd)", host);
+ ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+ mmci_irq_thread, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
goto clk_disable;
@@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev)
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
mmci_restore(host);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
}
return 0;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 158e1231aa23..ea6a0b5779d4 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -279,7 +279,11 @@ struct mmci_host;
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ * hardware, such as with some SDIO traffic that send
+ * odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
* @datactrl_first: true if data must be setup before send command
* @datacnt_useless: true if you could not use datacnt register to read
* remaining data
@@ -326,6 +330,8 @@ struct variant_data {
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
unsigned int datactrl_blocksz;
+ u8 datactrl_any_blocksz:1;
+ u8 dma_power_of_2:1;
u8 datactrl_first:1;
u8 datacnt_useless:1;
u8 st_sdio:1;
@@ -404,7 +410,6 @@ struct mmci_host {
struct mmci_host_ops *ops;
struct variant_data *variant;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_opendrain;
u8 hw_designer;
@@ -412,6 +417,7 @@ struct mmci_host {
struct timer_list timer;
unsigned int oldstat;
+ u32 irq_action;
/* pio stuff */
struct sg_mapping_iter sg_miter;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 010fe29a4888..7726dcf48f2c 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2194,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (ret)
goto host_free;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto host_free;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 74a0a7fbbf7f..203b61712601 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
- struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
+ if (irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = devm_ioremap_resource(&pdev->dev, r);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 011b59a3602e..b3d654c688e5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev)
mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
if (!host->pdata) {
- host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(host->dma)) {
+ if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk_put;
+ }
+
+ /* Ignore errors to fall back to PIO mode */
+ host->dma = NULL;
+ }
} else {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4031217d21c3..d82674aed447 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_clk_disable;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 767e964ca5a2..a379c45b985c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
ret = PTR_ERR(p);
goto err_free_irq;
}
- if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
- dev_info(host->dev, "missing default pinctrl state\n");
- devm_pinctrl_put(p);
- ret = -EINVAL;
- goto err_free_irq;
- }
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
dev_info(host->dev, "missing idle pinctrl state\n");
@@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
/* irq lost, if pinmux incorrect */
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
} else {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 771e3d00f1bb..01ffe51f413d 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
- if (!owl_host->dma) {
+ owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
+ if (IS_ERR(owl_host->dma)) {
dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
- ret = -ENXIO;
+ ret = PTR_ERR(owl_host->dma);
goto err_free_host;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 024acc1b0a2e..3a9333475a2b 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
- if (host->dma_chan_rx == NULL) {
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx)) {
dev_err(dev, "unable to request rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
goto out;
}
- host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
- if (host->dma_chan_tx == NULL) {
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx)) {
dev_err(dev, "unable to request tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
goto out;
}
@@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev)
}
/* FIXME: should we pass detection delay to debounce? */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_cd\n");
goto out;
}
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ if (!host->pdata->gpio_card_ro_invert)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
}
- if (!ret) {
+ if (!ret)
host->use_ro_gpio = true;
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
- }
if (host->pdata->init)
host->pdata->init(dev, pxamci_detect_irq, mmc);
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index c0504aa90857..f524251d5113 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -14,8 +14,8 @@
struct renesas_sdhi_scc {
unsigned long clk_rate; /* clock rate for SDR104 */
- u32 tap; /* sampling clock position for SDR104 */
- u32 tap_hs400; /* sampling clock position for HS400 */
+ u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */
+ u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
struct renesas_sdhi_of_data {
@@ -33,6 +33,11 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct renesas_sdhi_quirks {
+ bool hs400_disabled;
+ bool hs400_4taps;
+};
+
struct tmio_mmc_dma {
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
@@ -46,6 +51,7 @@ struct renesas_sdhi {
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
+ const struct renesas_sdhi_quirks *quirks;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
void __iomem *scc_ctl;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 234551a68739..35cb24cd45b4 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -46,11 +46,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-struct renesas_sdhi_quirks {
- bool hs400_disabled;
- bool hs400_4taps;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
- if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+ if (priv->quirks && priv->quirks->hs400_4taps)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
host->tap_set / 2);
@@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
- bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
/*
* Skip checking SCC errors when running on 4 taps in HS400 mode as
@@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
};
static const struct soc_device_attribute sdhi_quirks_match[] = {
+ { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ /* Sentinel. */ },
};
@@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!priv)
return -ENOMEM;
+ priv->quirks = quirks;
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
@@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (quirks && quirks->hs400_disabled)
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
- if (quirks && quirks->hs400_4taps)
- mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400;
-
/* For some SoC, we disable internal WP. GPIO may override this */
if (mmc_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
@@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
- priv->scc_tappos_hs400 = taps->tap_hs400;
+ priv->scc_tappos_hs400 = use_4tap ?
+ taps->tap_hs400_4tap :
+ taps->tap;
hit = true;
break;
}
}
if (!hit)
- dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+ dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
host->init_tuning = renesas_sdhi_init_tuning;
host->prepare_tuning = renesas_sdhi_prepare_tuning;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 18839a10594c..47ac53e91241 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
.clk_rate = 0,
.tap = 0x00000300,
- .tap_hs400 = 0x00000704,
+ .tap_hs400_4tap = 0x00000100,
},
};
@@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
* Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
* implementation as others may use a different implementation.
*/
-static const struct soc_device_attribute soc_whitelist[] = {
- /* specific ones */
+static const struct soc_device_attribute soc_dma_quirks[] = {
{ .soc_id = "r7s9210",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
{ .soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
{ .soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
- /* generic ones */
- { .soc_id = "r8a774a1" },
- { .soc_id = "r8a774b1" },
- { .soc_id = "r8a774c0" },
- { .soc_id = "r8a77470" },
- { .soc_id = "r8a7795" },
- { .soc_id = "r8a7796" },
- { .soc_id = "r8a77965" },
- { .soc_id = "r8a77970" },
- { .soc_id = "r8a77980" },
- { .soc_id = "r8a77990" },
- { .soc_id = "r8a77995" },
{ /* sentinel */ }
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+ const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
struct device *dev = &pdev->dev;
- if (!soc)
- return -ENODEV;
-
- global_flags |= (unsigned long)soc->data;
+ if (soc)
+ global_flags |= (unsigned long)soc->data;
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index bce9c33bc4b5..1e616ae56b13 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
/* If we get -ENOENT we have no card detect GPIO line */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
ret);
return ret;
}
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 105e73d4a3b9..9651dca6863e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -719,7 +719,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
goto err_free;
}
- host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+ host->ioaddr = devm_ioremap(dev, iomem->start,
resource_size(iomem));
if (host->ioaddr == NULL) {
err = -ENOMEM;
@@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 73bb440aaf93..ad01f6451a95 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -9,29 +9,236 @@
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
-static const struct sdhci_ops sdhci_brcmstb_ops = {
+#define SDHCI_VENDOR 0x78
+#define SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
+#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
+
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+
+struct sdhci_brcmstb_priv {
+ void __iomem *cfg_regs;
+ bool has_cqe;
+};
+
+struct brcmstb_match_priv {
+ void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+ struct sdhci_ops *ops;
+ unsigned int flags;
+};
+
+static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ u32 reg;
+
+ dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n",
+ __func__);
+ reg = readl(host->ioaddr + SDHCI_VENDOR);
+ if (ios->enhanced_strobe)
+ reg |= SDHCI_VENDOR_ENHANCED_STRB;
+ else
+ reg &= ~SDHCI_VENDOR_ENHANCED_STRB;
+ writel(reg, host->ioaddr + SDHCI_VENDOR);
+}
+
+static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ sdhci_enable_clk(host, clk);
+}
+
+static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n",
+ __func__, timing);
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (timing == MMC_TIMING_SD_HS ||
+ timing == MMC_TIMING_MMC_HS ||
+ timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = {
+ .enable = sdhci_brcmstb_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_brcmstb_dumpregs,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
-static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct brcmstb_match_priv match_priv_7425 = {
+ .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+ BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
+static struct brcmstb_match_priv match_priv_7445 = {
+ .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static const struct brcmstb_match_priv match_priv_7216 = {
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_7216,
+};
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+ { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+ { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+ {},
+};
+
+static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ struct sdhci_brcmstb_priv *priv)
+{
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!priv->has_cqe)
+ return sdhci_add_host(host);
+
+ dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(mmc_dev(host->mmc),
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_brcmstb_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64) {
+ dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ }
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_brcmstb_probe(struct platform_device *pdev)
{
- struct sdhci_host *host;
+ const struct brcmstb_match_priv *match_priv;
+ struct sdhci_pltfm_data brcmstb_pdata;
struct sdhci_pltfm_host *pltfm_host;
+ const struct of_device_id *match;
+ struct sdhci_brcmstb_priv *priv;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ bool has_cqe = false;
struct clk *clk;
int res;
+ match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
+ match_priv = match->data;
+
+ dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
+
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_err(&pdev->dev, "Clock not found in Device Tree\n");
clk = NULL;
}
@@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (res)
return res;
- host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+ memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ has_cqe = true;
+ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+ }
+ brcmstb_pdata.ops = match_priv->ops;
+ host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+ sizeof(struct sdhci_brcmstb_priv));
if (IS_ERR(host)) {
res = PTR_ERR(host);
goto err_clk;
}
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->has_cqe = has_cqe;
+
+ /* Map in the non-standard CFG registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(priv->cfg_regs)) {
+ res = PTR_ERR(priv->cfg_regs);
+ goto err;
+ }
+
sdhci_get_of_property(pdev);
res = mmc_of_parse(host->mmc);
if (res)
goto err;
/*
+ * If the chip has enhanced strobe and it's enabled, add
+ * callback
+ */
+ if (match_priv->hs400es &&
+ (host->mmc->caps2 & MMC_CAP2_HS400_ES))
+ host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+
+ /*
* Supply the existing CAPS, but clear the UHS modes. This
* will allow these modes to be specified by device tree
* properties through mmc_of_parse().
*/
host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
host->caps &= ~SDHCI_CAN_64BIT;
host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_DDR50);
- host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- res = sdhci_add_host(host);
+ res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
- pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
return res;
@@ -79,11 +314,15 @@ err_clk:
return res;
}
-static const struct of_device_id sdhci_brcm_of_match[] = {
- { .compatible = "brcm,bcm7425-sdhci" },
- { .compatible = "brcm,bcm7445-sdhci" },
- {},
-};
+static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sdhci_pltfm_unregister(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
static struct platform_driver sdhci_brcmstb_driver = {
@@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = {
},
.probe = sdhci_brcmstb_probe,
.remove = sdhci_pltfm_unregister,
+ .shutdown = sdhci_brcmstb_shutdown,
};
module_platform_driver(sdhci_brcmstb_driver);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index ae0ec27dd7cc..5827d3751b81 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
return 0;
}
-static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+static void *sdhci_cdns_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1c988d6a2433..382f25b2fa45 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_100mhz;
struct pinctrl_state *pins_200mhz;
const struct esdhc_soc_data *socdata;
@@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
if (IS_ERR(imx_data->pinctrl) ||
- IS_ERR(imx_data->pins_default) ||
IS_ERR(imx_data->pins_100mhz) ||
IS_ERR(imx_data->pins_200mhz))
return -EINVAL;
@@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
break;
default:
/* back to default state for other legacy timing */
- pinctrl = imx_data->pins_default;
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
@@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
- if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
+ if (esdhc_is_usdhc(imx_data)) {
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_100MHZ);
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
return err;
}
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
@@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_ahb_clk;
}
- imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(imx_data->pins_default))
- dev_warn(mmc_dev(host->mmc), "could not get default state\n");
-
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index a1aa21b9ae1c..92f30a1db435 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ret = 0;
struct f_sdhost_priv *priv;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: no irq specified\n", __func__);
+ if (irq < 0)
return irq;
- }
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
@@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
host->ops = &sdhci_milbeaut_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3d0bb5e2e09b..c3a160c18047 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -15,6 +15,7 @@
#include <linux/regulator/consumer.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
#define CORE_MCI_VERSION 0x50
#define CORE_VERSION_MAJOR_SHIFT 28
@@ -122,6 +123,10 @@
#define msm_host_writel(msm_host, val, host, offset) \
msm_host->var_ops->msm_writel_relaxed(val, host, offset)
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1 0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
@@ -1567,6 +1572,127 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*****************************************************************************\
+ * *
+ * MSM Command Queue Engine (CQE) *
+ * *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+ return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 ctrl;
+
+ /*
+ * When CQE is halted, the legacy SDHCI path operates only
+ * on 16-byte descriptors in 64bit mode.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 16;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * During CQE command transfers, command complete bit gets latched.
+ * So s/w should clear command complete interrupt status when CQE is
+ * either halted or disabled. Otherwise unexpected SDCHI legacy
+ * interrupt gets triggered when CQE is halted/disabled.
+ */
+ ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ctrl |= SDHCI_INT_RESPONSE;
+ sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ u32 cqcfg;
+ int ret;
+
+ /*
+ * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+ * So ensure ADMA table is allocated for 16byte descriptors.
+ */
+ if (host->caps & SDHCI_CAN_64BIT)
+ host->alloc_desc_sz = 16;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = cqhci_pltfm_init(pdev);
+ if (IS_ERR(cq_host)) {
+ ret = PTR_ERR(cq_host);
+ dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+ goto cleanup;
+ }
+
+ msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host->ops = &sdhci_msm_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ goto cleanup;
+ }
+
+ /* Disable cqe reset due to cqe enable signal */
+ cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+ cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+ cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+ /*
+ * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+ * So limit desc_sz to 12 so that the data commands that are sent
+ * during card initialization (before CQE gets enabled) would
+ * get executed without any issues.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 12;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&pdev->dev, "%s: CQE init: success\n",
+ mmc_hostname(host->mmc));
+ return ret;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
/*
* Platform specific register write functions. This is so that, if any
* register write needs to be followed up by platform specific actions,
@@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb,
+ .irq = sdhci_msm_cqe_irq,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
- struct resource *core_memres;
struct clk *clk;
int ret;
u16 host_version, core_minor;
@@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
u8 core_major;
const struct sdhci_msm_offset *msm_offset;
const struct sdhci_msm_variant_info *var_info;
+ struct device_node *node = pdev->dev.of_node;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
@@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
if (!msm_host->mci_removed) {
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
- core_memres);
-
+ msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(msm_host->core_mem)) {
ret = PTR_ERR(msm_host->core_mem);
goto clk_disable;
@@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
- ret = sdhci_add_host(host);
+ if (of_property_read_bool(node, "supports-cqe"))
+ ret = sdhci_msm_cqe_add_host(host, pdev);
+ else
+ ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
sdhci_msm_set_regulator_caps(msm_host);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 5959e394b416..ab2bd314a390 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -33,7 +33,14 @@
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
+struct sdhci_at91_soc_data {
+ const struct sdhci_pltfm_data *pdata;
+ bool baseclk_is_generated_internally;
+ unsigned int divider_for_baseclk;
+};
+
struct sdhci_at91_priv {
+ const struct sdhci_at91_soc_data *soc_data;
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
@@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_power = sdhci_at91_set_power,
};
-static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
.ops = &sdhci_at91_sama5d2_ops,
};
+static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = false,
+};
+
+static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = true,
+ .divider_for_baseclk = 2,
+};
+
static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+ { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
@@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
- int ret;
unsigned int caps0, caps1;
unsigned int clk_base, clk_mul;
- unsigned int gck_rate, real_gck_rate;
+ unsigned int gck_rate, clk_base_rate;
unsigned int preset_div;
- /*
- * The mult clock is provided by as a generated clock by the PMC
- * controller. In order to set the rate of gck, we have to get the
- * base clock rate and the clock mult from capabilities.
- */
clk_prepare_enable(priv->hclock);
caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
- clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
- clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
- gck_rate = clk_base * 1000000 * (clk_mul + 1);
- ret = clk_set_rate(priv->gck, gck_rate);
- if (ret < 0) {
- dev_err(dev, "failed to set gck");
- clk_disable_unprepare(priv->hclock);
- return ret;
- }
- /*
- * We need to check if we have the requested rate for gck because in
- * some cases this rate could be not supported. If it happens, the rate
- * is the closest one gck can provide. We have to update the value
- * of clk mul.
- */
- real_gck_rate = clk_get_rate(priv->gck);
- if (real_gck_rate != gck_rate) {
- clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
- caps1 &= (~SDHCI_CLOCK_MUL_MASK);
- caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) &
- SDHCI_CLOCK_MUL_MASK);
- /* Set capabilities in r/w mode. */
- writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN,
- host->ioaddr + SDMMC_CACR);
- writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
- /* Set capabilities in ro mode. */
- writel(0, host->ioaddr + SDMMC_CACR);
- dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n",
- clk_mul, real_gck_rate);
- }
+
+ gck_rate = clk_get_rate(priv->gck);
+ if (priv->soc_data->baseclk_is_generated_internally)
+ clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
+ else
+ clk_base_rate = clk_get_rate(priv->mainck);
+
+ clk_base = clk_base_rate / 1000000;
+ clk_mul = gck_rate / clk_base_rate - 1;
+
+ caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+ caps1 &= ~SDHCI_CLOCK_MUL_MASK;
+ caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+ /* Set capabilities in r/w mode. */
+ writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+ writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
+ writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+ /* Set capabilities in ro mode. */
+ writel(0, host->ioaddr + SDMMC_CACR);
+
+ dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+ clk_mul, gck_rate, clk_base_rate);
/*
* We have to set preset values because it depends on the clk_mul
@@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
* maximum sd clock value is 120 MHz instead of 208 MHz. For that
* reason, we need to use presets to support SDR104.
*/
- preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR12);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR25);
- preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR50);
- preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR104);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_DDR50);
@@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
static int sdhci_at91_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- const struct sdhci_pltfm_data *soc_data;
+ const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
@@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
+ priv->soc_data = soc_data;
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
- dev_err(&pdev->dev, "failed to get baseclk\n");
- return PTR_ERR(priv->mainck);
+ if (soc_data->baseclk_is_generated_internally) {
+ priv->mainck = NULL;
+ } else {
+ dev_err(&pdev->dev, "failed to get baseclk\n");
+ ret = PTR_ERR(priv->mainck);
+ goto sdhci_pltfm_free;
+ }
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
- return PTR_ERR(priv->hclock);
+ ret = PTR_ERR(priv->hclock);
+ goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
- return PTR_ERR(priv->gck);
+ ret = PTR_ERR(priv->gck);
+ goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 500f70a6ee42..5d8dd870bd44 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -173,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
u16 ret;
int shift = (spec_reg & 0x2) * 8;
+ if (spec_reg == SDHCI_TRANSFER_MODE)
+ return pltfm_host->xfer_mode_shadow;
+
if (spec_reg == SDHCI_HOST_VERSION)
ret = value & 0xffff;
else
@@ -562,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
{
- u32 val;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
ktime_t timeout;
+ u32 val, clk_en;
+
+ clk_en = ESDHC_CLOCK_SDCLKEN;
+
+ /*
+ * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
+ * is 2.2 or lower.
+ */
+ if (esdhc->vendor_ver <= VENDOR_V_22)
+ clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+ ESDHC_CLOCK_PEREN);
val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
+ val |= clk_en;
else
- val &= ~ESDHC_CLOCK_SDCLKEN;
+ val &= ~clk_en;
sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
- /* Wait max 20 ms */
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (1) {
+ while (esdhc->vendor_ver > VENDOR_V_22) {
bool timedout = ktime_after(ktime_get(), timeout);
- if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
}
- udelay(10);
+ usleep_range(10, 20);
}
}
@@ -621,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- int pre_div = 1;
- int div = 1;
- int division;
+ unsigned int pre_div = 1, div = 1;
+ unsigned int clock_fixup = 0;
ktime_t timeout;
- long fixup = 0;
u32 temp;
- host->mmc->actual_clock = 0;
-
if (clock == 0) {
+ host->mmc->actual_clock = 0;
esdhc_clock_enable(host, false);
return;
}
- /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ /* Start pre_div at 2 for vendor version < 2.3. */
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
+ /* Fix clock value. */
if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
- esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
- fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+ esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+ clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
else if (esdhc->clk_fixup)
- fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
-
- if (fixup && clock > fixup)
- clock = fixup;
+ clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
- ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ if (clock_fixup == 0 || clock < clock_fixup)
+ clock_fixup = clock;
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ /* Calculate pre_div and div. */
+ while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
pre_div *= 2;
- while (host->max_clk / pre_div / div > clock && div < 16)
+ while (host->max_clk / pre_div / div > clock_fixup && div < 16)
div++;
+ esdhc->div_ratio = pre_div * div;
+
+ /* Limit clock division for HS400 200MHz clock for quirk. */
if (esdhc->quirk_limited_clk_division &&
clock == MMC_HS200_MAX_DTR &&
(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING)) {
- division = pre_div * div;
- if (division <= 4) {
+ if (esdhc->div_ratio <= 4) {
pre_div = 4;
div = 1;
- } else if (division <= 8) {
+ } else if (esdhc->div_ratio <= 8) {
pre_div = 4;
div = 2;
- } else if (division <= 12) {
+ } else if (esdhc->div_ratio <= 12) {
pre_div = 4;
div = 3;
} else {
pr_warn("%s: using unsupported clock division.\n",
mmc_hostname(host->mmc));
}
+ esdhc->div_ratio = pre_div * div;
}
+ host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
+
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
- host->mmc->actual_clock = host->max_clk / pre_div / div;
- esdhc->div_ratio = pre_div * div;
+ clock, host->mmc->actual_clock);
+
+ /* Set clock division into register. */
pre_div >>= 1;
div--;
+ esdhc_clock_enable(host, false);
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | (div << ESDHC_DIVIDER_SHIFT)
- | (pre_div << ESDHC_PREDIV_SHIFT));
+ temp &= ~ESDHC_CLOCK_MASK;
+ temp |= ((div << ESDHC_DIVIDER_SHIFT) |
+ (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (esdhc->vendor_ver > VENDOR_V_22) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ /* Additional setting for HS400. */
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
clock == MMC_HS200_MAX_DTR) {
temp = sdhci_readl(host, ESDHC_TBCTL);
@@ -711,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
esdhc_clock_enable(host, false);
esdhc_flush_async_fifo(host);
}
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- while (1) {
- bool timedout = ktime_after(ktime_get(), timeout);
-
- if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
- break;
- if (timedout) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- udelay(10);
- }
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= ESDHC_CLOCK_SDCLKEN;
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ esdhc_clock_enable(host, true);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -758,23 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u32 val;
+ u32 val, bus_width = 0;
+ /*
+ * Add delay to make sure all the DMA transfers are finished
+ * for quirk.
+ */
if (esdhc->quirk_delay_before_data_reset &&
(mask & SDHCI_RESET_DATA) &&
(host->flags & SDHCI_REQ_USE_DMA))
mdelay(5);
+ /*
+ * Save bus-width for eSDHC whose vendor version is 2.2
+ * or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+ }
+
sdhci_reset(host, mask);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ /*
+ * Restore bus-width setting and interrupt registers for eSDHC
+ * whose vendor version is 2.2 or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+ val |= bus_width;
+ sdhci_writel(host, val, ESDHC_PROCTL);
- if (mask & SDHCI_RESET_ALL) {
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ /*
+ * Some bits have to be cleaned manually for eSDHC whose spec
+ * version is higher than 3.0 for all reset.
+ */
+ if ((mask & SDHCI_RESET_ALL) &&
+ (esdhc->spec_ver >= SDHCI_SPEC_300)) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
+ /*
+ * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+ * 0 for quirk.
+ */
if (esdhc->quirk_unreliable_pulse_detection) {
val = sdhci_readl(host, ESDHC_DLLCFG1);
val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
@@ -854,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
static struct soc_device_attribute soc_tuning_erratum_type1[] = {
- { .family = "QorIQ T1023", .revision = "1.0", },
- { .family = "QorIQ T1040", .revision = "1.0", },
- { .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ LS1021A", .revision = "1.0", },
+ { .family = "QorIQ T1023", },
+ { .family = "QorIQ T1040", },
+ { .family = "QorIQ T2080", },
+ { .family = "QorIQ LS1021A", },
{ },
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
- { .family = "QorIQ LS1012A", .revision = "1.0", },
- { .family = "QorIQ LS1043A", .revision = "1.*", },
- { .family = "QorIQ LS1046A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
- { .family = "QorIQ LA1575A", .revision = "1.0", },
+ { .family = "QorIQ LS1012A", },
+ { .family = "QorIQ LS1043A", },
+ { .family = "QorIQ LS1046A", },
+ { .family = "QorIQ LS1080A", },
+ { .family = "QorIQ LS2080A", },
+ { .family = "QorIQ LA1575A", },
{ },
};
@@ -888,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
-static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
u8 *window_end)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u8 tbstat_15_8, tbstat_7_0;
u32 val;
- if (esdhc->quirk_tuning_erratum_type1) {
- *window_start = 5 * esdhc->div_ratio;
- *window_end = 3 * esdhc->div_ratio;
- return;
- }
-
/* Write TBCTL[11:8]=4'h8 */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~(0xf << 8);
@@ -920,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
val = sdhci_readl(host, ESDHC_TBSTAT);
val = sdhci_readl(host, ESDHC_TBSTAT);
+ *window_end = val & 0xff;
+ *window_start = (val >> 8) & 0xff;
+}
+
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 start_ptr, end_ptr;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
+
/* Reset data lines by setting ESDHCCTL[RSTD] */
sdhci_reset(host, SDHCI_RESET_DATA);
/* Write 32'hFFFF_FFFF to IRQSTAT register */
sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
- /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
- * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
+ * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
* then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
* and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
*/
- tbstat_7_0 = val & 0xff;
- tbstat_15_8 = (val >> 8) & 0xff;
- if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
*window_start = 8 * esdhc->div_ratio;
*window_end = 4 * esdhc->div_ratio;
} else {
@@ -1006,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret)
break;
+ /* For type2 affected platforms of the tuning erratum,
+ * tuning may succeed although eSDHC might not have
+ * tuned properly. Need to check tuning window.
+ */
+ if (esdhc->quirk_tuning_erratum_type2 &&
+ !host->tuning_err) {
+ esdhc_tuning_window_ptr(host, &window_start,
+ &window_end);
+ if (abs(window_start - window_end) >
+ (4 * esdhc->div_ratio + 2))
+ host->tuning_err = -EAGAIN;
+ }
+
/* If HW tuning fails and triggers erratum,
* try workaround.
*/
@@ -1238,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
* 1/2 peripheral clock.
*/
if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
+ of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
+ of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
esdhc->peripheral_clock = clk_get_rate(clk) / 2;
else
esdhc->peripheral_clock = clk_get_rate(clk);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 083e7e053c95..882053151a47 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -85,6 +86,7 @@
/* sdhci-omap controller flags */
#define SDHCI_OMAP_REQUIRE_IODELAY BIT(0)
+#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
u32 offset;
@@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
- reg |= CON_DMA_MASTER;
+ reg &= ~CON_DMA_MASTER;
+ /* Switch to DMA slave mode when using external DMA */
+ if (!host->use_external_dma)
+ reg |= CON_DMA_MASTER;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
return 0;
@@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
sdhci_omap_start_clock(omap_host);
}
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long limit = MMC_TIMEOUT_US;
+ unsigned long i = 0;
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
mask &= ~SDHCI_RESET_DATA;
+ if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+ while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
+ (i++ < limit))
+ udelay(1);
+ i = 0;
+ while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
+ (i++ < limit))
+ udelay(1);
+
+ if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
+ dev_err(mmc_dev(host->mmc),
+ "Timeout waiting on controller reset in %s\n",
+ __func__);
+ return;
+ }
+
sdhci_reset(host, mask);
}
@@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
return intmask;
}
+static void sdhci_omap_set_timeout(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_ERASE)
+ sdhci_set_data_timeout_irq(host, false);
+
+ __sdhci_set_timeout(host, cmd);
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
@@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = {
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
.irq = sdhci_omap_irq,
+ .set_timeout = sdhci_omap_set_timeout,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = {
.offset = 0x200,
};
+static const struct sdhci_omap_data am335_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data am437_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data dra7_data = {
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
@@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = {
static const struct of_device_id omap_sdhci_match[] = {
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
+ { .compatible = "ti,am335-sdhci", .data = &am335_data },
+ { .compatible = "ti,am437-sdhci", .data = &am437_data },
{},
};
MODULE_DEVICE_TABLE(of, omap_sdhci_match);
@@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct sdhci_omap_data *data;
const struct soc_device_attribute *soc;
+ struct resource *regs;
match = of_match_device(omap_sdhci_match, dev);
if (!match)
@@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
}
offset = data->offset;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
sizeof(*omap_host));
if (IS_ERR(host)) {
@@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
host->ioaddr += offset;
+ host->mapbase = regs->start + offset;
mmc = host->mmc;
sdhci_get_of_property(pdev);
@@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
+ /* Switch to external DMA only if there is the "dmas" property */
+ if (of_find_property(dev->of_node, "dmas", NULL))
+ sdhci_switch_external_dma(host, true);
+
ret = sdhci_setup_host(host);
if (ret)
goto err_put_sync;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 5091e2c1c0e5..525de2454a4d 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1991,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
- slot->cd_override_level, 0, NULL);
+ slot->cd_override_level, 0);
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
- 0, NULL);
+ 0);
if (ret == -EPROBE_DEFER)
goto remove;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 51e096f27388..64200c78e90d 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -117,7 +117,6 @@ struct sdhci_s3c {
struct s3c_sdhci_platdata *pdata;
int cur_clk;
int ext_cd_irq;
- int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_s3c *sc;
- struct resource *res;
int ret, irq, ptr, clks;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_pdata_io_clk;
} else {
memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
- sc->ext_cd_gpio = -1; /* invalid gpio number */
}
drv_data = sdhci_s3c_get_driver_data(pdev);
@@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_no_busclks;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index e43143223320..f4b05dd6c20a 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* We must request the IRQ after sdhci_add_host(), as the tasklet only
* gets setup in sdhci_add_host() and we oops.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto err_request_cd;
if (!ret)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 916b5b09c3d1..b4b63089a4e2 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
static int sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct resource *iomem;
struct spear_sdhci *sdhci;
struct device *dev;
int ret;
@@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev)
goto err;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
@@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev)
* It is optional to use GPIOs for sdhci card detection. If we
* find a descriptor using slot GPIO, we use it.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 7bc950520fd9..403ac44a7378 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1b1c26da3fe0..63db84481dff 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -10,6 +10,7 @@
*/
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/ktime.h>
#include <linux/highmem.h>
#include <linux/io.h>
@@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
if (enable)
host->ier |= SDHCI_INT_DATA_TIMEOUT;
@@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- u8 count;
-
- if (host->ops->set_timeout) {
- host->ops->set_timeout(host, cmd);
- } else {
- bool too_big = false;
-
- count = sdhci_calc_timeout(host, cmd, &too_big);
-
- if (too_big &&
- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
- sdhci_calc_sw_timeout(host, cmd);
- sdhci_set_data_timeout_irq(host, false);
- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
- sdhci_set_data_timeout_irq(host, true);
- }
-
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+ bool too_big = false;
+ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+ if (too_big &&
+ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+ sdhci_calc_sw_timeout(host, cmd);
+ sdhci_set_data_timeout_irq(host, false);
+ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+ sdhci_set_data_timeout_irq(host, true);
}
+
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
}
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- struct mmc_data *data = cmd->data;
-
- host->data_timeout = 0;
-
- if (sdhci_data_line_cmd(cmd))
- sdhci_set_timeout(host, cmd);
-
- if (!data)
- return;
+ if (host->ops->set_timeout)
+ host->ops->set_timeout(host, cmd);
+ else
+ __sdhci_set_timeout(host, cmd);
+}
+static void sdhci_initialize_data(struct sdhci_host *host,
+ struct mmc_data *data)
+{
WARN_ON(host->data);
/* Sanity checks */
@@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
host->data = data;
host->data_early = 0;
host->data->bytes_xfered = 0;
+}
+
+static inline void sdhci_set_block_info(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host,
+ SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+ SDHCI_BLOCK_SIZE);
+ /*
+ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+ * can be supported, in that case 16-bit block count register must be 0.
+ */
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+ } else {
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
@@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_set_transfer_irqs(host);
- /* Set the DMA boundary value and block size */
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_block_info(host, data);
+}
- /*
- * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
- * can be supported, in that case 16-bit block count register must be 0.
- */
- if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
- (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
- if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
- sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
- sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+
+static int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
+
+ host->tx_chan = dma_request_chan(mmc->parent, "tx");
+ if (IS_ERR(host->tx_chan)) {
+ ret = PTR_ERR(host->tx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request TX DMA channel.\n");
+ host->tx_chan = NULL;
+ return ret;
+ }
+
+ host->rx_chan = dma_request_chan(mmc->parent, "rx");
+ if (IS_ERR(host->rx_chan)) {
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ ret = PTR_ERR(host->rx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request RX DMA channel.\n");
+ host->rx_chan = NULL;
+ }
+
+ return ret;
+}
+
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static int sdhci_external_dma_setup(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ int ret, i;
+ enum dma_transfer_direction dir;
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = cmd->data;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ dma_cookie_t cookie;
+ int sg_cnt;
+
+ if (!host->mapbase)
+ return -EINVAL;
+
+ cfg.src_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ /* Sanity check: all the SG entries must be aligned by block size. */
+ for (i = 0; i < data->sg_len; i++) {
+ if ((data->sg + i)->length % data->blksz)
+ return -EINVAL;
+ }
+
+ chan = sdhci_external_dma_channel(host, data);
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ret;
+
+ sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+ if (sg_cnt <= 0)
+ return -EINVAL;
+
+ dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ ret = cookie;
+
+ return ret;
+}
+
+static void sdhci_external_dma_release(struct sdhci_host *host)
+{
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ if (host->rx_chan) {
+ dma_release_channel(host->rx_chan);
+ host->rx_chan = NULL;
+ }
+
+ sdhci_switch_external_dma(host, false);
+}
+
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
+
+ host->flags |= SDHCI_REQ_USE_DMA;
+ sdhci_set_transfer_irqs(host);
+
+ sdhci_set_block_info(host, data);
+}
+
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (!sdhci_external_dma_setup(host, cmd)) {
+ __sdhci_external_dma_prepare_data(host, cmd);
} else {
- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ sdhci_external_dma_release(host);
+ pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
+ mmc_hostname(host->mmc));
+ sdhci_prepare_data(host, cmd);
}
}
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct dma_chan *chan;
+
+ if (!cmd->data)
+ return;
+
+ chan = sdhci_external_dma_channel(host, cmd->data);
+ if (chan)
+ dma_async_issue_pending(chan);
+}
+
+#else
+
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ /* This should never happen */
+ WARN_ON_ONCE(1);
+}
+
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+}
+
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return NULL;
+}
+
+#endif
+
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
+{
+ host->use_external_dma = en;
+}
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
+
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
struct mmc_request *mrq)
{
@@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
int i;
- if (host->cmd && host->cmd->mrq == mrq)
- host->cmd = NULL;
-
- if (host->data_cmd && host->data_cmd->mrq == mrq)
- host->data_cmd = NULL;
-
- if (host->data && host->data->mrq == mrq)
- host->data = NULL;
-
- if (sdhci_needs_reset(host, mrq))
- host->pending_reset = true;
-
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
if (host->mrqs_done[i] == mrq) {
WARN_ON(1);
@@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
}
WARN_ON(i >= SDHCI_MAX_MRQS);
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
+
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
+ sdhci_set_mrq_done(host, mrq);
sdhci_del_timer(host, mrq);
@@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host)
/*
* Need to send CMD12 if -
- * a) open-ended multiblock transfer (no CMD23)
+ * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
* b) error in multiblock transfer
*/
if (data->stop &&
- (data->error ||
- !data->mrq->sbc)) {
+ ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
+ data->error)) {
/*
* 'cap_cmd_during_tfr' request must not use the command line
* after mmc_command_done() has been called. It is upper layer's
@@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
}
host->cmd = cmd;
+ host->data_timeout = 0;
if (sdhci_data_line_cmd(cmd)) {
WARN_ON(host->data_cmd);
host->data_cmd = cmd;
+ sdhci_set_timeout(host, cmd);
}
- sdhci_prepare_data(host, cmd);
+ if (cmd->data) {
+ if (host->use_external_dma)
+ sdhci_external_dma_prepare_data(host, cmd);
+ else
+ sdhci_prepare_data(host, cmd);
+ }
sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
@@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout += 10 * HZ;
sdhci_mod_timer(host, cmd->mrq, timeout);
+ if (host->use_external_dma)
+ sdhci_external_dma_pre_transfer(host, cmd);
+
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_led_activate(host);
- /*
- * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
- * requests if Auto-CMD12 is enabled.
- */
- if (sdhci_auto_cmd12(host, mrq)) {
- if (mrq->stop) {
- mrq->data->stop = NULL;
- mrq->stop = NULL;
- }
- }
-
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
mrq->cmd->error = -ENOMEDIUM;
sdhci_finish_mrq(host, mrq);
@@ -2661,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (host->flags & SDHCI_REQ_USE_DMA) {
struct mmc_data *data = mrq->data;
+ if (host->use_external_dma && data &&
+ (mrq->cmd->error || data->error)) {
+ struct dma_chan *chan = sdhci_external_dma_channel(host, data);
+
+ host->mrqs_done[i] = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ dmaengine_terminate_sync(chan);
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_set_mrq_done(host, mrq);
+ }
+
if (data && data->host_cookie == COOKIE_MAPPED) {
if (host->bounce_buffer) {
/*
@@ -3796,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host)
if (sdhci_can_64bit_dma(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
+ if (host->use_external_dma) {
+ ret = sdhci_external_dma_init(host);
+ if (ret == -EPROBE_DEFER)
+ goto unreg;
+ /*
+ * Fall back to use the DMA/PIO integrated in standard SDHCI
+ * instead of external DMA devices.
+ */
+ else if (ret)
+ sdhci_switch_external_dma(host, false);
+ /* Disable internal DMA sources */
+ else
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+ }
+
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->set_dma_mask)
ret = host->ops->set_dma_mask(host);
@@ -3822,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host)
dma_addr_t dma;
void *buf;
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_64_DESC_SZ(host);
- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
- } else {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_32_DESC_SZ;
- host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
- }
+ if (!(host->flags & SDHCI_USE_64_BIT_DMA))
+ host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ else if (!host->alloc_desc_sz)
+ host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+
+ host->desc_sz = host->alloc_desc_sz;
+ host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
/*
@@ -3913,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
- if (host->clk_mul) {
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
- } else
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ /*
+ * Divided Clock Mode minimum clock rate is always less than
+ * Programmable Clock Mode minimum clock rate.
+ */
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
@@ -4276,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host)
dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, host->align_buffer,
host->align_addr);
+
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
@@ -4321,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host)
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ host->use_external_dma ? "External DMA" :
(host->flags & SDHCI_USE_ADMA) ?
(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -4409,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
host->adma_table_sz, host->align_buffer,
host->align_addr);
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index fe83ece6965b..a6a3ddcf97e7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -487,6 +487,7 @@ struct sdhci_host {
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
+ phys_addr_t mapbase; /* physical address base */
char *bounce_buffer; /* For packing SDMA reads/writes */
dma_addr_t bounce_addr;
unsigned int bounce_buffer_size;
@@ -535,6 +536,7 @@ struct sdhci_host {
bool pending_reset; /* Cmd/data reset is pending */
bool irq_wake_enabled; /* IRQ wakeup is enabled */
bool v4_mode; /* Host Version 4 Enable */
+ bool use_external_dma; /* Host selects to use external DMA */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -556,7 +558,8 @@ struct sdhci_host {
dma_addr_t adma_addr; /* Mapped ADMA descr. table */
dma_addr_t align_addr; /* Mapped bounce buffer */
- unsigned int desc_sz; /* ADMA descriptor size */
+ unsigned int desc_sz; /* ADMA current descriptor size */
+ unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */
struct workqueue_struct *complete_wq; /* Request completion wq */
struct work_struct complete_work; /* Request completion work */
@@ -564,6 +567,11 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+#endif
+
u32 caps; /* CAPABILITY_0 */
u32 caps1; /* CAPABILITY_1 */
bool read_caps; /* Capability flags have been read */
@@ -795,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host);
void sdhci_reset_tuning(struct sdhci_host *host);
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b8e897e31e2e..3afea580fbea 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
writeb(val, host->ioaddr + reg);
}
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = sdhci_execute_tuning(mmc, opcode);
+
+ if (err)
+ return err;
+ /*
+ * Tuning data remains in the buffer after tuning.
+ * Do a command and data reset to get rid of it
+ */
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
- int cmd_error = 0;
- int data_error = 0;
-
- if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
- return intmask;
-
- cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
- return 0;
-}
-
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
struct sdhci_am654_data *sdhci_am654;
const struct of_device_id *match;
struct sdhci_host *host;
- struct resource *res;
struct clk *clk_xin;
struct device *dev = &pdev->dev;
void __iomem *base;
@@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto pm_runtime_put;
@@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_put;
}
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
ret = sdhci_am654_init(host);
if (ret)
goto pm_runtime_put;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index fa0dfc657c22..4625cc071b61 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
u32 reg = 0;
@@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 98c575de43c7..7e1fd557109c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
host->chan_rx = sh_mmcif_request_dma_pdata(host,
pdata->slave_id_rx);
} else {
- host->chan_tx = dma_request_slave_channel(dev, "tx");
- host->chan_rx = dma_request_slave_channel(dev, "rx");
+ host->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->chan_tx))
+ host->chan_tx = NULL;
+ host->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->chan_rx))
+ host->chan_rx = NULL;
}
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
host->chan_rx);
@@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
struct sh_mmcif_host *host;
struct device *dev = &pdev->dev;
struct sh_mmcif_plat_data *pd = dev->platform_data;
- struct resource *res;
void __iomem *reg;
const char *name;
@@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
if (irq[0] < 0)
return -ENXIO;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d577a6b0ceae..f87d7967457f 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
if (ret)
return ret;
- host->reg_base = devm_ioremap_resource(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->reg_base))
return PTR_ERR(host->reg_base);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index c4a1d49fbea4..1e424bcdbd5f 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
- struct resource *res;
void __iomem *ctl;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctl = devm_ioremap_resource(&pdev->dev, res);
+ ctl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctl))
return ERR_CAST(ctl);
@@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
* Look for a card detect GPIO, if it fails with anything
* else than a probe deferral, just live without it.
*/
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 0c72ec5546c3..a1683c49cb90 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -59,7 +59,6 @@
struct uniphier_sd_priv {
struct tmio_mmc_data tmio_data;
struct pinctrl *pinctrl;
- struct pinctrl_state *pinstate_default;
struct pinctrl_state *pinstate_uhs;
struct clk *clk;
struct reset_control *rst;
@@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
- struct pinctrl_state *pinstate;
+ struct pinctrl_state *pinstate = NULL;
u32 val, tmp;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
val = UNIPHIER_SD_VOLT_330;
- pinstate = priv->pinstate_default;
break;
case MMC_SIGNAL_VOLTAGE_180:
val = UNIPHIER_SD_VOLT_180;
@@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
- pinctrl_select_state(priv->pinctrl, pinstate);
+ if (pinstate)
+ pinctrl_select_state(priv->pinctrl, pinstate);
+ else
+ pinctrl_select_default_state(mmc_dev(mmc));
return 0;
}
@@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
if (IS_ERR(priv->pinctrl))
return PTR_ERR(priv->pinctrl);
- priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(priv->pinstate_default))
- return PTR_ERR(priv->pinstate_default);
-
priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
if (IS_ERR(priv->pinstate_uhs))
return PTR_ERR(priv->pinstate_uhs);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b11ac2314328..9a0b1e4e405d 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -199,7 +199,6 @@ struct usdhi6_host {
/* Pin control */
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
};
@@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
};
int ret;
- host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
host->chan_tx);
- if (!host->chan_tx)
+ if (IS_ERR(host->chan_tx)) {
+ host->chan_tx = NULL;
return;
+ }
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = start + USDHI6_SD_BUF0;
@@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
if (ret < 0)
goto e_release_tx;
- host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
host->chan_rx);
- if (!host->chan_rx)
+ if (IS_ERR(host->chan_rx)) {
+ host->chan_rx = NULL;
goto e_release_tx;
+ }
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr;
@@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
host->pins_uhs);
default:
- return pinctrl_select_state(host->pinctrl,
- host->pins_default);
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
}
@@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev)
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
- if (!IS_ERR(host->pins_uhs)) {
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
-
- if (IS_ERR(host->pins_default)) {
- dev_err(dev,
- "UHS pinctrl requires a default pin state.\n");
- ret = PTR_ERR(host->pins_default);
- goto e_free_mmc;
- }
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index f4ac064ff471..e48bddd95ce6 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1106,7 +1106,7 @@ static int via_sd_probe(struct pci_dev *pcidev,
len = pci_resource_len(pcidev, 0);
base = pci_resource_start(pcidev, 0);
- sdhost->mmiobase = ioremap_nocache(base, len);
+ sdhost->mmiobase = ioremap(base, len);
if (!sdhost->mmiobase) {
ret = -ENOMEM;
goto free_mmc_host;
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index eccf2e5d905e..3af50db8b21b 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -320,7 +320,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
* ChipCommon revision.
*/
if (b47s->bcma_cc->core->id.rev == 54)
- b47s->window = ioremap_nocache(res->start, resource_size(res));
+ b47s->window = ioremap(res->start, resource_size(res));
else
b47s->window = ioremap_cache(res->start, resource_size(res));
if (!b47s->window) {
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 462fadb56bdb..42a95ba40f2c 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -163,7 +163,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index c9b7b4d5a923..460494212f6a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -191,7 +191,7 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev,
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 5c27c6994896..85e14150a073 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -249,7 +249,7 @@ static int __init esb2rom_init_one(struct pci_dev *pdev,
}
/* Map the firmware hub into my address space. */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6b989f391baa..fda72c5fd8f9 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -184,7 +184,7 @@ static int __init ichxrom_init_one(struct pci_dev *pdev,
}
/* Map the firmware hub into my address space. */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 69503aef981e..d67b845b0e89 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -133,7 +133,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
- p->csr_base = ioremap_nocache(csr_phys, csr_len);
+ p->csr_base = ioremap(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
@@ -152,7 +152,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
- p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
+ p->map.virt = ioremap(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 0eeadfeb620d..832b880d1aaf 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -78,7 +78,7 @@ static int __init init_l440gx(void)
return -ENODEV;
}
- l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index abc52b70bb00..0bb651624f05 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -82,10 +82,10 @@ static int __init init_netsc520(void)
printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
(unsigned long long)netsc520_map.size,
(unsigned long long)netsc520_map.phys);
- netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
+ netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
if (!netsc520_map.virt) {
- printk("Failed to ioremap_nocache\n");
+ printk("Failed to ioremap\n");
return -EIO;
}
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 50046d497398..7d349874ffeb 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -176,7 +176,7 @@ static int __init nettel_init(void)
#endif
int rc = 0;
- nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
+ nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
if (nettel_mmcrp == NULL) {
printk("SNAPGEAR: failed to disable MMCR cache??\n");
return(-EIO);
@@ -217,7 +217,7 @@ static int __init nettel_init(void)
__asm__ ("wbinvd");
nettel_amd_map.phys = amdaddr;
- nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+ nettel_amd_map.virt = ioremap(amdaddr, maxsize);
if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
iounmap(nettel_mmcrp);
@@ -303,7 +303,7 @@ static int __init nettel_init(void)
/* Probe for the size of the first Intel flash */
nettel_intel_map.size = maxsize;
nettel_intel_map.phys = intel0addr;
- nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
rc = -EIO;
@@ -337,7 +337,7 @@ static int __init nettel_init(void)
iounmap(nettel_intel_map.virt);
nettel_intel_map.size = maxsize;
- nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
rc = -EIO;
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 9a49f8a06fb8..377ef0fc4e3e 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -94,7 +94,7 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
map->map.write = mtd_pci_write8,
map->map.size = 0x00800000;
- map->base = ioremap_nocache(pci_resource_start(dev, 0),
+ map->base = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!map->base)
@@ -188,7 +188,7 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
map->map.read = mtd_pci_read32,
map->map.write = mtd_pci_write32,
map->map.size = len;
- map->base = ioremap_nocache(base, len);
+ map->base = ioremap(base, len);
if (!map->base)
return -ENOMEM;
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 03af2df90d47..9902b37e18b4 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -174,8 +174,8 @@ static void sc520cdp_setup_par(void)
int i, j;
/* map in SC520's MMCR area */
- mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
- if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
+ mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+ if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
/* force physical address fields to BIOS defaults: */
for(i = 0; i < NUM_FLASH_BANKS; i++)
sc520cdp_map[i].phys = par_table[i].default_address;
@@ -225,10 +225,10 @@ static int __init init_sc520cdp(void)
(unsigned long long)sc520cdp_map[i].size,
(unsigned long long)sc520cdp_map[i].phys);
- sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+ sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
if (!sc520cdp_map[i].virt) {
- printk("Failed to ioremap_nocache\n");
+ printk("Failed to ioremap\n");
for (j = 0; j < i; j++) {
if (mymtd[j]) {
map_destroy(mymtd[j]);
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 2afb253bf456..57303f904bc1 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -152,7 +152,7 @@ static int scb2_flash_probe(struct pci_dev *dev,
}
/* remap the IO window (w/o caching) */
- scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+ scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
if (!scb2_ioaddr) {
printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
if (!region_fail)
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 6cfc8783c0e5..70d6e865f555 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -56,10 +56,10 @@ static int __init init_ts5500_map(void)
{
int rc = 0;
- ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+ ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
if (!ts5500_map.virt) {
- printk(KERN_ERR "Failed to ioremap_nocache\n");
+ printk(KERN_ERR "Failed to ioremap\n");
rc = -EIO;
goto err2;
}
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index e10b76089048..75eb3e97fae3 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -404,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev)
goto out1;
}
- ctx->base = ioremap_nocache(r->start, 0x1000);
+ ctx->base = ioremap(r->start, 0x1000);
if (!ctx->base) {
dev_err(&pdev->dev, "cannot remap NAND memory area\n");
ret = -ENODEV;
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index d62aa5271753..2f77ee55e1bf 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret;
}
- denali->reg = ioremap_nocache(csr_base, csr_len);
+ denali->reg = ioremap(csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
- denali->host = ioremap_nocache(mem_base, mem_len);
+ denali->host = ioremap(mem_base, mem_len);
if (!denali->host) {
- dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
+ dev_err(&dev->dev, "Spectra: ioremap failed!");
ret = -ENOMEM;
goto out_unmap_reg;
}
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 1054cc070747..f31fae3a4c68 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev)
fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
FSL_UPM_WAIT_WRITE_BYTE;
- fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+ fun->io_base = devm_ioremap(&ofdev->dev, io_res.start,
resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 334fe3130285..b9d5d55a5edb 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
int ret;
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0)
+ return ret;
+
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
@@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this)
*/
writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- return 0;
err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
return ret;
}
@@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev)
return ret;
}
+ /* Set flag to get timing setup restored for next exec_op */
+ if (this->hw.clk_rate)
+ this->hw.must_apply_timings = true;
+
/* re-init the BCH registers */
ret = bch_set_geometry(this);
if (ret) {
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index aeb3ad2dbfb8..b0cd443dd758 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2124,6 +2124,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
return 0;
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
@@ -4769,9 +4771,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
{
- struct mtd_info *mtd = &nor->mtd;
-
- if (mtd->size <= SZ_16M)
+ if (nor->params.size <= SZ_16M)
return;
nor->flags |= SNOR_F_4B_OPCODES;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d02f12a5254e..25a8f9387d5a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -71,6 +71,49 @@ config DUMMY
To compile this driver as a module, choose M here: the module
will be called dummy.
+config WIREGUARD
+ tristate "WireGuard secure network tunnel"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select DST_CACHE
+ select CRYPTO
+ select CRYPTO_LIB_CURVE25519
+ select CRYPTO_LIB_CHACHA20POLY1305
+ select CRYPTO_LIB_BLAKE2S
+ select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
+ select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
+ select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
+ select CRYPTO_CURVE25519_X86 if X86 && 64BIT
+ select ARM_CRYPTO if ARM
+ select ARM64_CRYPTO if ARM64
+ select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_ARM if ARM
+ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+ select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
+ select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
+ help
+ WireGuard is a secure, fast, and easy to use replacement for IPSec
+ that uses modern cryptography and clever networking tricks. It's
+ designed to be fairly general purpose and abstract enough to fit most
+ use cases, while at the same time remaining extremely simple to
+ configure. See www.wireguard.com for more info.
+
+ It's safe to say Y or M here, as the driver is very lightweight and
+ is only in use when an administrator chooses to add an interface.
+
+config WIREGUARD_DEBUG
+ bool "Debugging checks and verbose messages"
+ depends on WIREGUARD
+ help
+ This will write log messages for handshake and other events
+ that occur for a WireGuard interface. It will also perform some
+ extra validation checks and unit tests at various points. This is
+ only useful for debugging.
+
+ Say N here unless you know what you're doing.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -489,12 +532,12 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config THUNDERBOLT_NET
- tristate "Networking over Thunderbolt cable"
- depends on THUNDERBOLT && INET
+config USB4_NET
+ tristate "Networking over USB4 and Thunderbolt cables"
+ depends on USB4 && INET
help
- Select this if you want to create network between two
- computers over a Thunderbolt cable. The driver supports Apple
+ Select this if you want to create network between two computers
+ over a USB4 and Thunderbolt cables. The driver supports Apple
ThunderboltIP protocol and allows communication with any host
supporting the same protocol including Windows and macOS.
@@ -506,6 +549,8 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ depends on INET
+ depends on IPV6 || IPV6=n
select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0d3ba056cda3..71b88ffc5587 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_WIREGUARD) += wireguard/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
@@ -76,6 +77,6 @@ obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index b3c63d2f16aa..18428e104445 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -189,7 +189,7 @@ static int cops_nodeid (struct net_device *dev, int nodeid);
static irqreturn_t cops_interrupt (int irq, void *dev_id);
static void cops_poll(struct timer_list *t);
-static void cops_timeout(struct net_device *dev);
+static void cops_timeout(struct net_device *dev, unsigned int txqueue);
static void cops_rx (struct net_device *dev);
static netdev_tx_t cops_send_packet (struct sk_buff *skb,
struct net_device *dev);
@@ -844,7 +844,7 @@ static void cops_rx(struct net_device *dev)
netif_rx(skb);
}
-static void cops_timeout(struct net_device *dev)
+static void cops_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b0f5bc07aef5..22a49c6d7ae6 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -356,7 +356,7 @@ int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
-void arcnet_timeout(struct net_device *dev);
+void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
/* I/O equivalents */
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 553776cc1d29..e04efc0a5c97 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -763,7 +763,7 @@ static int go_tx(struct net_device *dev)
}
/* Called by the kernel when transmit times out */
-void arcnet_timeout(struct net_device *dev)
+void arcnet_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
struct arcnet_local *lp = netdev_priv(dev);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index e3b25f310936..31e43a2197a3 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -31,16 +31,6 @@
#define AD_CHURN_DETECTION_TIME 60
#define AD_AGGREGATE_WAIT_TIME 2
-/* Port state definitions (43.4.2.2 in the 802.3ad standard) */
-#define AD_STATE_LACP_ACTIVITY 0x1
-#define AD_STATE_LACP_TIMEOUT 0x2
-#define AD_STATE_AGGREGATION 0x4
-#define AD_STATE_SYNCHRONIZATION 0x8
-#define AD_STATE_COLLECTING 0x10
-#define AD_STATE_DISTRIBUTING 0x20
-#define AD_STATE_DEFAULTED 0x40
-#define AD_STATE_EXPIRED 0x80
-
/* Port Variables definitions used by the State Machines (43.4.7 in the
* 802.3ad standard)
*/
@@ -457,8 +447,8 @@ static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
- ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
+ ((lacpdu->partner_state & LACP_STATE_AGGREGATION) == (port->actor_oper_port_state & LACP_STATE_AGGREGATION))) ||
+ ((lacpdu->actor_state & LACP_STATE_AGGREGATION) == 0)
) {
port->sm_vars |= AD_PORT_MATCHED;
} else {
@@ -492,18 +482,18 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
partner->port_state = lacpdu->actor_state;
/* set actor_oper_port_state.defaulted to FALSE */
- port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
+ port->actor_oper_port_state &= ~LACP_STATE_DEFAULTED;
/* set the partner sync. to on if the partner is sync,
* and the port is matched
*/
if ((port->sm_vars & AD_PORT_MATCHED) &&
- (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
- partner->port_state |= AD_STATE_SYNCHRONIZATION;
+ (lacpdu->actor_state & LACP_STATE_SYNCHRONIZATION)) {
+ partner->port_state |= LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=1\n");
} else {
- partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
+ partner->port_state &= ~LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=0\n");
}
@@ -526,7 +516,7 @@ static void __record_default(struct port *port)
sizeof(struct port_params));
/* set actor_oper_port_state.defaulted to true */
- port->actor_oper_port_state |= AD_STATE_DEFAULTED;
+ port->actor_oper_port_state |= LACP_STATE_DEFAULTED;
}
}
@@ -556,7 +546,7 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
!MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
- (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
+ (lacpdu->actor_state & LACP_STATE_AGGREGATION) != (partner->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -588,8 +578,8 @@ static void __update_default_selected(struct port *port)
!MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
- (admin->port_state & AD_STATE_AGGREGATION)
- != (oper->port_state & AD_STATE_AGGREGATION)) {
+ (admin->port_state & LACP_STATE_AGGREGATION)
+ != (oper->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -619,10 +609,10 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
!MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
- ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
- ((lacpdu->partner_state & AD_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)) ||
- ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
- ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
+ ((lacpdu->partner_state & LACP_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY)) ||
+ ((lacpdu->partner_state & LACP_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT)) ||
+ ((lacpdu->partner_state & LACP_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) ||
+ ((lacpdu->partner_state & LACP_STATE_AGGREGATION) != (port->actor_oper_port_state & LACP_STATE_AGGREGATION))
) {
port->ntt = true;
}
@@ -978,7 +968,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
* edable port will take place only after this timer)
*/
if ((port->sm_vars & AD_PORT_SELECTED) &&
- (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
if (port->aggregator->is_active)
port->sm_mux_state =
@@ -996,14 +986,14 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_state = AD_MUX_DETACHED;
} else if (port->aggregator->is_active) {
port->actor_oper_port_state |=
- AD_STATE_SYNCHRONIZATION;
+ LACP_STATE_SYNCHRONIZATION;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
- !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
- !(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
port->sm_mux_state = AD_MUX_ATTACHED;
} else {
/* if port state hasn't changed make
@@ -1032,11 +1022,11 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
ad_disable_collecting_distributing(port,
update_slave_arr);
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
port->ntt = true;
break;
case AD_MUX_WAITING:
@@ -1045,20 +1035,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
case AD_MUX_ATTACHED:
if (port->aggregator->is_active)
port->actor_oper_port_state |=
- AD_STATE_SYNCHRONIZATION;
+ LACP_STATE_SYNCHRONIZATION;
else
port->actor_oper_port_state &=
- ~AD_STATE_SYNCHRONIZATION;
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ ~LACP_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
- port->actor_oper_port_state |= AD_STATE_COLLECTING;
- port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
- port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
ad_enable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
@@ -1156,7 +1146,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_vars |= AD_PORT_LACP_ENABLED;
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* Fall Through */
@@ -1166,9 +1156,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
case AD_RX_LACP_DISABLED:
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
- port->partner_oper.port_state &= ~AD_STATE_AGGREGATION;
+ port->partner_oper.port_state &= ~LACP_STATE_AGGREGATION;
port->sm_vars |= AD_PORT_MATCHED;
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_EXPIRED:
/* Reset of the Synchronization flag (Standard 43.4.12)
@@ -1177,19 +1167,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
- port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+ port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
- port->actor_oper_port_state |= AD_STATE_EXPIRED;
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
break;
case AD_RX_DEFAULTED:
__update_default_selected(port);
__record_default(port);
port->sm_vars |= AD_PORT_MATCHED;
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
/* detect loopback situation */
@@ -1202,8 +1192,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__update_selected(lacpdu, port);
__update_ntt(lacpdu, port);
__record_pdu(lacpdu, port);
- port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT));
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
default:
break;
@@ -1231,7 +1221,7 @@ static void ad_churn_machine(struct port *port)
if (port->sm_churn_actor_timer_counter &&
!(--port->sm_churn_actor_timer_counter) &&
port->sm_churn_actor_state == AD_CHURN_MONITOR) {
- if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) {
+ if (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_actor_state = AD_NO_CHURN;
} else {
port->churn_actor_count++;
@@ -1241,7 +1231,7 @@ static void ad_churn_machine(struct port *port)
if (port->sm_churn_partner_timer_counter &&
!(--port->sm_churn_partner_timer_counter) &&
port->sm_churn_partner_state == AD_CHURN_MONITOR) {
- if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) {
+ if (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_partner_state = AD_NO_CHURN;
} else {
port->churn_partner_count++;
@@ -1298,7 +1288,7 @@ static void ad_periodic_machine(struct port *port)
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))
) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
@@ -1315,11 +1305,11 @@ static void ad_periodic_machine(struct port *port)
switch (port->sm_periodic_state) {
case AD_FAST_PERIODIC:
if (!(port->partner_oper.port_state
- & AD_STATE_LACP_TIMEOUT))
+ & LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
break;
case AD_SLOW_PERIODIC:
- if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
+ if ((port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) {
port->sm_periodic_timer_counter = 0;
port->sm_periodic_state = AD_PERIODIC_TX;
}
@@ -1335,7 +1325,7 @@ static void ad_periodic_machine(struct port *port)
break;
case AD_PERIODIC_TX:
if (!(port->partner_oper.port_state &
- AD_STATE_LACP_TIMEOUT))
+ LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
else
port->sm_periodic_state = AD_FAST_PERIODIC;
@@ -1542,7 +1532,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
ad_agg_selection_logic(aggregator, update_slave_arr);
if (!port->aggregator->is_active)
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
}
/* Decide if "agg" is a better choice for the new active aggregator that
@@ -1848,13 +1838,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = AD_STATE_AGGREGATION |
- AD_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = AD_STATE_AGGREGATION |
- AD_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+ LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+ LACP_STATE_LACP_ACTIVITY;
if (lacp_fast)
- port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
@@ -2105,10 +2095,10 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
- port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state &= ~LACP_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
@@ -2695,9 +2685,9 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
if (lacp_fast)
- port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
else
- port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_TIMEOUT;
}
spin_unlock_bh(&bond->mode_lock);
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index bd40b114d6cd..d737ceb61203 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -270,7 +270,9 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- BUG_ON(dev == NULL);
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index c8e1a04ba384..9df2007b5e56 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1302,7 +1302,7 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_put;
}
- addr = ioremap_nocache(res->start, resource_size(res));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index b9047d8110d5..194c86e0f340 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -175,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev)
err = -EBUSY;
goto exit;
}
- base = ioremap_nocache(mem[idx], iosize);
+ base = ioremap(mem[idx], iosize);
if (!base) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 1c4d32d1a542..d513fac50718 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -130,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
err = -EBUSY;
goto exit;
}
- base = ioremap_nocache(mem[idx], iosize);
+ base = ioremap(mem[idx], iosize);
if (!base) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index ff5a96f34085..d7222ba46622 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -229,7 +229,7 @@ static int sp_probe(struct platform_device *pdev)
resource_size(res_mem), DRV_NAME))
return -EBUSY;
- addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ addr = devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
if (!addr)
return -ENOMEM;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 2e57122f02fb..2f5c287eac95 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = tty->disc_data;
+ struct slcan *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 8242fb287cbb..d1ddf763b188 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -777,7 +777,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
goto platform_resource_failed;
card->dpram_phys = pres->start;
card->dpram_size = resource_size(pres);
- card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+ card->dpram = ioremap(card->dpram_phys, card->dpram_size);
if (!card->dpram) {
dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
goto ioremap_failed;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c7667645f04a..2d38dbc9dd8c 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -54,6 +54,8 @@ source "drivers/net/dsa/mv88e6xxx/Kconfig"
source "drivers/net/dsa/ocelot/Kconfig"
+source "drivers/net/dsa/qca/Kconfig"
+
source "drivers/net/dsa/sja1105/Kconfig"
config NET_DSA_QCA8K
@@ -103,7 +105,6 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- depends on OF
depends on NET_DSA
select FIXED_PHY
select VITESSE_PHY
@@ -114,7 +115,6 @@ config NET_DSA_VITESSE_VSC73XX
config NET_DSA_VITESSE_VSC73XX_SPI
tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support"
- depends on OF
depends on NET_DSA
depends on SPI
select NET_DSA_VITESSE_VSC73XX
@@ -124,7 +124,6 @@ config NET_DSA_VITESSE_VSC73XX_SPI
config NET_DSA_VITESSE_VSC73XX_PLATFORM
tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support"
- depends on OF
depends on NET_DSA
depends on HAS_IOMEM
select NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 9d384a32b3a2..4a943ccc2ca4 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -21,4 +21,5 @@ obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
+obj-y += qca/
obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index edacacfc9365..060497512159 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -371,8 +371,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
- mgmt &= ~SM_SW_FWD_MODE;
-
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
@@ -573,9 +571,8 @@ EXPORT_SYMBOL(b53_disable_port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
{
- bool tag_en = !(ds->ops->get_tag_protocol(ds, port) ==
- DSA_TAG_PROTO_NONE);
struct b53_device *dev = ds->priv;
+ bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
u8 hdr_ctl, val;
u16 reg;
@@ -595,6 +592,22 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
break;
}
+ /* Enable management mode if tagging is requested */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
+ if (tag_en)
+ hdr_ctl |= SM_SW_FWD_MODE;
+ else
+ hdr_ctl &= ~SM_SW_FWD_MODE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
+
+ /* Configure the appropriate IMP port */
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
+ if (port == 8)
+ hdr_ctl |= GC_FRM_MGMT_PORT_MII;
+ else if (port == 5)
+ hdr_ctl |= GC_FRM_MGMT_PORT_M;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -1866,36 +1879,57 @@ static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
return false;
}
-static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
+static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol tag_protocol)
{
bool ret = b53_possible_cpu_port(ds, port);
- if (!ret)
+ if (!ret) {
dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
port);
+ return ret;
+ }
+
+ switch (tag_protocol) {
+ case DSA_TAG_PROTO_BRCM:
+ case DSA_TAG_PROTO_BRCM_PREPEND:
+ dev_warn(ds->dev,
+ "Port %d is stacked to Broadcom tag switch\n", port);
+ ret = false;
+ break;
+ default:
+ ret = true;
+ break;
+ }
+
return ret;
}
-enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot)
{
struct b53_device *dev = ds->priv;
/* Older models (5325, 5365) support a different tag format that we do
- * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
- * mode to be turned on which means we need to specifically manage ARL
- * misses on multicast addresses (TBD).
+ * not support in net/dsa/tag_brcm.c yet.
*/
- if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
- !b53_can_enable_brcm_tags(ds, port))
- return DSA_TAG_PROTO_NONE;
+ if (is5325(dev) || is5365(dev) ||
+ !b53_can_enable_brcm_tags(ds, port, mprot)) {
+ dev->tag_protocol = DSA_TAG_PROTO_NONE;
+ goto out;
+ }
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
* which requires us to use the prepended Broadcom tag type
*/
- if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT)
- return DSA_TAG_PROTO_BRCM_PREPEND;
+ if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
+ goto out;
+ }
- return DSA_TAG_PROTO_BRCM;
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM;
+out:
+ return dev->tag_protocol;
}
EXPORT_SYMBOL(b53_get_tag_protocol);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1877acf05081..3c30f3a7eb29 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -118,6 +118,7 @@ struct b53_device {
u8 jumbo_size_reg;
int reset_gpio;
u8 num_arl_entries;
+ enum dsa_tag_protocol tag_protocol;
/* used ports mask */
u16 enabled_ports;
@@ -359,7 +360,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
-enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e43040c9f9ee..3e8635311d0d 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
+ reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index c8d7ef27fd72..fdcb70b9f0e4 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -61,7 +61,8 @@ struct dsa_loop_priv {
static struct phy_device *phydevs[PHY_MAX_ADDR];
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
dev_dbg(ds->dev, "%s: port: %d\n", __func__, port);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index e3c333a8f45d..cc17a44dd3a8 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -883,7 +883,8 @@ static int lan9303_check_device(struct lan9303 *chip)
/* ---------------------------- DSA -----------------------------------*/
static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_LAN9303;
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 955324968b74..0369c22fe3e1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -841,7 +841,8 @@ static int gswip_setup(struct dsa_switch *ds)
}
static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_GSWIP;
}
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 24a5e99f7fd5..47d65b77caf7 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -645,7 +645,8 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
}
static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_KSZ8795;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 50ffc63d6231..9a51b8a4de5d 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -295,7 +295,8 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
}
static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
struct ksz_device *dev = ds->priv;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ed1ec10ec62b..022466ca1c19 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1223,7 +1223,8 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-mtk_get_tag_protocol(struct dsa_switch *ds, int port)
+mtk_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
struct mt7530_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a5a37f47b320..24b8219fd607 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -43,7 +43,8 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
}
static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol m)
{
return DSA_TAG_PROTO_TRAILER;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3bd988529178..8c9289549688 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -340,11 +340,14 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
+ snprintf(chip->irq_name, sizeof(chip->irq_name),
+ "mv88e6xxx-%s", dev_name(chip->dev));
+
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
- dev_name(chip->dev), chip);
+ chip->irq_name, chip);
mv88e6xxx_reg_lock(chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@ -2303,10 +2306,14 @@ static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
if (!irq)
return 0;
+ snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port);
+
/* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-serdes", dev_id);
+ IRQF_ONESHOT, dev_id->serdes_irq_name,
+ dev_id);
mv88e6xxx_reg_lock(chip);
if (err)
return err;
@@ -3838,6 +3845,9 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -3889,6 +3899,9 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3939,6 +3952,9 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_validate = mv88e6390_phylink_validate,
@@ -4085,6 +4101,9 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4424,6 +4443,9 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -4476,6 +4498,9 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5207,7 +5232,8 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
}
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol m)
{
struct mv88e6xxx_chip *chip = ds->priv;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8a8e38bfb161..f332cb4b2fbf 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -236,6 +236,7 @@ struct mv88e6xxx_port {
bool mirror_ingress;
bool mirror_egress;
unsigned int serdes_irq;
+ char serdes_irq_name[32];
};
struct mv88e6xxx_chip {
@@ -292,11 +293,16 @@ struct mv88e6xxx_chip {
struct mv88e6xxx_irq g1_irq;
struct mv88e6xxx_irq g2_irq;
int irq;
+ char irq_name[32];
int device_irq;
+ char device_irq_name[32];
int watchdog_irq;
+ char watchdog_irq_name[32];
int atu_prob_irq;
+ char atu_prob_irq_name[32];
int vtu_prob_irq;
+ char vtu_prob_irq_name[32];
struct kthread_worker *kworker;
struct kthread_delayed_work irq_poll_work;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index bdcd25560dd2..bac9a8a68e50 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -425,9 +425,12 @@ int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
if (chip->atu_prob_irq < 0)
return chip->atu_prob_irq;
+ snprintf(chip->atu_prob_irq_name, sizeof(chip->atu_prob_irq_name),
+ "mv88e6xxx-%s-g1-atu-prob", dev_name(chip->dev));
+
err = request_threaded_irq(chip->atu_prob_irq, NULL,
mv88e6xxx_g1_atu_prob_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob",
+ IRQF_ONESHOT, chip->atu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->atu_prob_irq);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 33056a609e96..48390b7b18ad 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -631,9 +631,12 @@ int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
if (chip->vtu_prob_irq < 0)
return chip->vtu_prob_irq;
+ snprintf(chip->vtu_prob_irq_name, sizeof(chip->vtu_prob_irq_name),
+ "mv88e6xxx-%s-g1-vtu-prob", dev_name(chip->dev));
+
err = request_threaded_irq(chip->vtu_prob_irq, NULL,
mv88e6xxx_g1_vtu_prob_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob",
+ IRQF_ONESHOT, chip->vtu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->vtu_prob_irq);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 87bfe7c8c9cd..01503014b1ee 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -948,10 +948,13 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
if (chip->watchdog_irq < 0)
return chip->watchdog_irq;
+ snprintf(chip->watchdog_irq_name, sizeof(chip->watchdog_irq_name),
+ "mv88e6xxx-%s-watchdog", dev_name(chip->dev));
+
err = request_threaded_irq(chip->watchdog_irq, NULL,
mv88e6xxx_g2_watchdog_thread_fn,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- "mv88e6xxx-watchdog", chip);
+ chip->watchdog_irq_name, chip);
if (err)
return err;
@@ -1114,9 +1117,12 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
goto out;
}
+ snprintf(chip->device_irq_name, sizeof(chip->device_irq_name),
+ "mv88e6xxx-%s-g2", dev_name(chip->dev));
+
err = request_threaded_irq(chip->device_irq, NULL,
mv88e6xxx_g2_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g2", chip);
+ IRQF_ONESHOT, chip->device_irq_name, chip);
if (err)
goto out;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 902feb398746..8d8b3b74aee1 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -405,22 +405,116 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
return err;
}
+struct mv88e6390_serdes_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int reg;
+};
+
+static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
+ { "serdes_rx_pkts", 0xf021 },
+ { "serdes_rx_bytes", 0xf024 },
+ { "serdes_rx_pkts_error", 0xf027 },
+};
+
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int i;
+
+ if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ memcpy(data + i * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ }
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
+ struct mv88e6390_serdes_hw_stat *stat)
+{
+ u16 reg[3];
+ int err, i;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ stat->reg + i, &reg[i]);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+ }
+
+ return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
+}
+
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int lane;
+ int i;
+
+ lane = mv88e6390_serdes_get_lane(chip, port);
+ if (lane == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ data[i] = mv88e6390_serdes_get_stat(chip, lane, stat);
+ }
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg |= MV88E6390_PG_CONTROL_ENABLE_PC;
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, reg);
+}
+
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool up)
{
u8 cmode = chip->ports[port].cmode;
+ int err = 0;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, up);
+ err = mv88e6390_serdes_power_sgmii(chip, lane, up);
+ break;
case MV88E6XXX_PORT_STS_CMODE_XAUI:
case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- return mv88e6390_serdes_power_10g(chip, lane, up);
+ err = mv88e6390_serdes_power_10g(chip, lane, up);
+ break;
}
- return 0;
+ if (!err && up)
+ err = mv88e6390_serdes_enable_checker(chip, lane);
+
+ return err;
}
static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index bd8df36ab537..d16ef4da20b0 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -74,6 +74,10 @@
#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
+/* Packet generator pad packet checker */
+#define MV88E6390_PG_CONTROL 0xf010
+#define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0)
+
u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
@@ -99,6 +103,11 @@ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 6f9804093150..a5b7cca03d09 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -3,8 +3,10 @@ config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
depends on NET_VENDOR_MICROSEMI
+ depends on NET_VENDOR_FREESCALE
select MSCC_OCELOT_SWITCH
select NET_DSA_TAG_OCELOT
+ select FSL_ENETC_MDIO
help
This driver supports the VSC9959 network switch, which is a member of
the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index b7f92464815d..3257962c147e 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2,16 +2,22 @@
/* Copyright 2019 NXP Semiconductors
*/
#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
#include <linux/module.h>
+#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <net/dsa.h>
#include "felix.h"
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_OCELOT;
}
@@ -26,14 +32,6 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
-static void felix_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct ocelot *ocelot = ds->priv;
-
- ocelot_adjust_link(ocelot, port, phydev);
-}
-
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -155,6 +153,141 @@ static void felix_port_disable(struct dsa_switch *ds, int port)
return ocelot_port_disable(ocelot, port);
}
+static void felix_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != ocelot_port->phy_mode) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* No half-duplex. */
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ /* The internal ports that run at 2.5G are overclocked GMII */
+ if (state->interface == PHY_INTERFACE_MODE_GMII ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->pcs_link_state)
+ felix->info->pcs_link_state(ocelot, port, state);
+
+ return 0;
+}
+
+static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
+ u32 mac_fc_cfg;
+
+ /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
+ * PORT_RST bits in CLOCK_CFG
+ */
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(state->speed),
+ DEV_CLOCK_CFG);
+
+ /* Flow control. Link speed is only used here to evaluate the time
+ * specification in incoming pause frames.
+ */
+ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(state->speed);
+
+ /* handle Rx pause in all cases, with 2500base-X this is used for rate
+ * adaptation.
+ */
+ mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
+
+ if (state->pause & MLO_PAUSE_TX)
+ mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
+ ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
+
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+
+ if (felix->info->pcs_init)
+ felix->info->pcs_init(ocelot, port, link_an_mode, state);
+}
+
+static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->pcs_an_restart)
+ felix->info->pcs_an_restart(ocelot, port);
+}
+
+static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+
+static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ /* Enable MAC module */
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
+ DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+
+ /* Core: Enable port for frame transfer */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -185,10 +318,76 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static int felix_parse_ports_node(struct felix *felix,
+ struct device_node *ports_node,
+ phy_interface_t *port_phy_modes)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ phy_interface_t phy_mode;
+ u32 port;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &port) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ port);
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ if (err < 0) {
+ dev_err(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
+ return err;
+ }
+
+ port_phy_modes[port] = phy_mode;
+ }
+
+ return 0;
+}
+
+static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
+{
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *switch_node;
+ struct device_node *ports_node;
+ int err;
+
+ switch_node = dev->of_node;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
+ of_node_put(ports_node);
+
+ return err;
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
- resource_size_t base;
+ phy_interface_t *port_phy_modes;
+ resource_size_t switch_base;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -203,7 +402,19 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->ops = felix->info->ops;
- base = pci_resource_start(felix->pdev, felix->info->pci_bar);
+ port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
+ GFP_KERNEL);
+ if (!port_phy_modes)
+ return -ENOMEM;
+
+ err = felix_parse_dt(felix, port_phy_modes);
+ if (err) {
+ kfree(port_phy_modes);
+ return err;
+ }
+
+ switch_base = pci_resource_start(felix->pdev,
+ felix->info->switch_pci_bar);
for (i = 0; i < TARGET_MAX; i++) {
struct regmap *target;
@@ -214,13 +425,14 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res = &felix->info->target_io_res[i];
res->flags = IORESOURCE_MEM;
- res->start += base;
- res->end += base;
+ res->start += switch_base;
+ res->end += switch_base;
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
+ kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -230,6 +442,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
err = ocelot_regfields_init(ocelot, felix->info->regfields);
if (err) {
dev_err(ocelot->dev, "failed to init reg fields map\n");
+ kfree(port_phy_modes);
return err;
}
@@ -244,26 +457,37 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
if (!ocelot_port) {
dev_err(ocelot->dev,
"failed to allocate port memory\n");
+ kfree(port_phy_modes);
return -ENOMEM;
}
res = &felix->info->port_io_res[port];
res->flags = IORESOURCE_MEM;
- res->start += base;
- res->end += base;
+ res->start += switch_base;
+ res->end += switch_base;
port_regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(port_regs)) {
dev_err(ocelot->dev,
"failed to map registers for port %d\n", port);
+ kfree(port_phy_modes);
return PTR_ERR(port_regs);
}
+ ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->regs = port_regs;
ocelot->ports[port] = ocelot_port;
}
+ kfree(port_phy_modes);
+
+ if (felix->info->mdio_bus_alloc) {
+ err = felix->info->mdio_bus_alloc(ocelot);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
@@ -293,12 +517,22 @@ static int felix_setup(struct dsa_switch *ds)
OCELOT_TAG_PREFIX_LONG);
}
+ /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
+ * isn't instantiated for the Felix PF.
+ * In-band AN may take a few ms to complete, so we need to poll.
+ */
+ ds->pcs_poll = true;
+
return 0;
}
static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
@@ -369,7 +603,12 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
- .adjust_link = felix_adjust_link,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
+ .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_an_restart = felix_phylink_mac_an_restart,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_disable = felix_port_disable,
.port_fdb_dump = felix_fdb_dump,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 204296e51d0c..3a7580015b62 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -10,6 +10,7 @@
struct felix_info {
struct resource *target_io_res;
struct resource *port_io_res;
+ struct resource *imdio_res;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -17,7 +18,18 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
- int pci_bar;
+ int switch_pci_bar;
+ int imdio_pci_bar;
+ int (*mdio_bus_alloc)(struct ocelot *ocelot);
+ void (*mdio_bus_free)(struct ocelot *ocelot);
+ void (*pcs_init)(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state);
+ void (*pcs_an_restart)(struct ocelot *ocelot, int port);
+ void (*pcs_link_state)(struct ocelot *ocelot, int port,
+ struct phylink_link_state *state);
+ int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
+ phy_interface_t phy_mode);
};
extern struct felix_info felix_info_vsc9959;
@@ -32,6 +44,8 @@ struct felix {
struct pci_dev *pdev;
struct felix_info *info;
struct ocelot ocelot;
+ struct mii_bus *imdio;
+ struct phy_device **pcs;
};
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index b9758b0d18c7..2c812b481778 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2,12 +2,33 @@
/* Copyright 2017 Microsemi Corporation
* Copyright 2018-2019 NXP Semiconductors
*/
+#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
+/* TODO: should find a better place for these */
+#define USXGMII_BMCR_RESET BIT(15)
+#define USXGMII_BMCR_AN_EN BIT(12)
+#define USXGMII_BMCR_RST_AN BIT(9)
+#define USXGMII_BMSR_LNKS(status) (((status) & GENMASK(2, 2)) >> 2)
+#define USXGMII_BMSR_AN_CMPL(status) (((status) & GENMASK(5, 5)) >> 5)
+#define USXGMII_ADVERTISE_LNKS(x) (((x) << 15) & BIT(15))
+#define USXGMII_ADVERTISE_FDX BIT(12)
+#define USXGMII_ADVERTISE_SPEED(x) (((x) << 9) & GENMASK(11, 9))
+#define USXGMII_LPA_LNKS(lpa) ((lpa) >> 15)
+#define USXGMII_LPA_DUPLEX(lpa) (((lpa) & GENMASK(12, 12)) >> 12)
+#define USXGMII_LPA_SPEED(lpa) (((lpa) & GENMASK(11, 9)) >> 9)
+
+enum usxgmii_speed {
+ USXGMII_SPEED_10 = 0,
+ USXGMII_SPEED_100 = 1,
+ USXGMII_SPEED_1000 = 2,
+ USXGMII_SPEED_2500 = 4,
+};
+
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
@@ -386,6 +407,15 @@ static struct resource vsc9959_port_io_res[] = {
},
};
+/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
+ * SGMII/QSGMII MAC PCS can be found.
+ */
+static struct resource vsc9959_imdio_res = {
+ .start = 0x8030,
+ .end = 0x8040,
+ .name = "imdio",
+};
+
static const struct reg_field vsc9959_regfields[] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
[ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
@@ -565,13 +595,495 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
+static void vsc9959_pcs_an_restart_sgmii(struct phy_device *pcs)
+{
+ phy_set_bits(pcs, MII_BMCR, BMCR_ANRESTART);
+}
+
+static void vsc9959_pcs_an_restart_usxgmii(struct phy_device *pcs)
+{
+ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
+ USXGMII_BMCR_RESET |
+ USXGMII_BMCR_AN_EN |
+ USXGMII_BMCR_RST_AN);
+}
+
+static void vsc9959_pcs_an_restart(struct ocelot *ocelot, int port)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_an_restart_sgmii(pcs);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_an_restart_usxgmii(pcs);
+ break;
+ default:
+ dev_err(ocelot->dev, "Invalid PCS interface type %s\n",
+ phy_modes(pcs->interface));
+ break;
+ }
+}
+
+/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
+ * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
+ * into the PCS, which is retrieved out-of-band over MDIO. This also has the
+ * benefit of working with SGMII fixed-links, like downstream switches, where
+ * both link partners attempt to operate as AN slaves and therefore AN never
+ * completes. But it also has the disadvantage that some PHY chips don't pass
+ * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
+ * setting MLO_AN_INBAND is actually required for those.
+ */
+static void vsc9959_pcs_init_sgmii(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode == MLO_AN_INBAND) {
+ int bmsr, bmcr;
+
+ /* Some PHYs like VSC8234 don't like it when AN restarts on
+ * their system side and they restart line side AN too, going
+ * into an endless link up/down loop. Don't restart PCS AN if
+ * link is up already.
+ * We do check that AN is enabled just in case this is the 1st
+ * call, PCS detects a carrier but AN is disabled from power on
+ * or by boot loader.
+ */
+ bmcr = phy_read(pcs, MII_BMCR);
+ if (bmcr < 0)
+ return;
+
+ bmsr = phy_read(pcs, MII_BMSR);
+ if (bmsr < 0)
+ return;
+
+ if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
+ return;
+
+ /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
+ * for the MAC PCS in order to acknowledge the AN.
+ */
+ phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
+ ADVERTISE_LPACK);
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_USE_SGMII_AN);
+
+ /* Adjust link timer for SGMII */
+ phy_write(pcs, ENETC_PCS_LINK_TIMER1,
+ ENETC_PCS_LINK_TIMER1_VAL);
+ phy_write(pcs, ENETC_PCS_LINK_TIMER2,
+ ENETC_PCS_LINK_TIMER2_VAL);
+
+ phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
+ } else {
+ int speed;
+
+ if (state->duplex == DUPLEX_HALF) {
+ phydev_err(pcs, "Half duplex not supported\n");
+ return;
+ }
+ switch (state->speed) {
+ case SPEED_1000:
+ speed = ENETC_PCS_SPEED_1000;
+ break;
+ case SPEED_100:
+ speed = ENETC_PCS_SPEED_100;
+ break;
+ case SPEED_10:
+ speed = ENETC_PCS_SPEED_10;
+ break;
+ case SPEED_UNKNOWN:
+ /* Silently don't do anything */
+ return;
+ default:
+ phydev_err(pcs, "Invalid PCS speed %d\n", state->speed);
+ return;
+ }
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_SGMII_SPEED(speed));
+
+ /* Yes, not a mistake: speed is given by IF_MODE. */
+ phy_write(pcs, MII_BMCR, BMCR_RESET |
+ BMCR_SPEED1000 |
+ BMCR_FULLDPLX);
+ }
+}
+
+/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
+ * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
+ * auto-negotiation of any link parameters. Electrically it is compatible with
+ * a single lane of XAUI.
+ * The hardware reference manual wants to call this mode SGMII, but it isn't
+ * really, since the fundamental features of SGMII:
+ * - Downgrading the link speed by duplicating symbols
+ * - Auto-negotiation
+ * are not there.
+ * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers
+ * because the clock frequency is actually given by a PLL configured in the
+ * Reset Configuration Word (RCW).
+ * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
+ * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
+ * lower link speed on line side, the system-side interface remains fixed at
+ * 2500 Mbps and we do rate adaptation through pause frames.
+ */
+static void vsc9959_pcs_init_2500basex(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode == MLO_AN_INBAND) {
+ phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
+ return;
+ }
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
+
+ phy_write(pcs, MII_BMCR, BMCR_SPEED1000 |
+ BMCR_FULLDPLX |
+ BMCR_RESET);
+}
+
+static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode != MLO_AN_INBAND) {
+ phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
+ return;
+ }
+
+ /* Configure device ability for the USXGMII Replicator */
+ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
+ USXGMII_ADVERTISE_SPEED(USXGMII_SPEED_2500) |
+ USXGMII_ADVERTISE_LNKS(1) |
+ ADVERTISE_SGMII |
+ ADVERTISE_LPACK |
+ USXGMII_ADVERTISE_FDX);
+}
+
+static void vsc9959_pcs_init(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ /* The PCS does not implement the BMSR register fully, so capability
+ * detection via genphy_read_abilities does not work. Since we can get
+ * the PHY config word from the LPA register though, there is still
+ * value in using the generic phy_resolve_aneg_linkmode function. So
+ * populate the supported and advertising link modes manually here.
+ */
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
+ if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ pcs->interface == PHY_INTERFACE_MODE_USXGMII)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ pcs->supported);
+ if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ pcs->supported);
+ phy_advertise_supported(pcs);
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_init_sgmii(pcs, link_an_mode, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ vsc9959_pcs_init_2500basex(pcs, link_an_mode, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_init_usxgmii(pcs, link_an_mode, state);
+ break;
+ default:
+ dev_err(ocelot->dev, "Unsupported link mode %s\n",
+ phy_modes(pcs->interface));
+ }
+}
+
+static void vsc9959_pcs_link_state_resolve(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ state->an_complete = pcs->autoneg_complete;
+ state->an_enabled = pcs->autoneg;
+ state->link = pcs->link;
+ state->duplex = pcs->duplex;
+ state->speed = pcs->speed;
+ /* SGMII AN does not negotiate flow control, but that's ok,
+ * since phylink already knows that, and does:
+ * link_state.pause |= pl->phy_state.pause;
+ */
+ state->pause = MLO_PAUSE_NONE;
+
+ phydev_dbg(pcs,
+ "mode=%s/%s/%s adv=%*pb lpa=%*pb link=%u an_enabled=%u an_complete=%u\n",
+ phy_modes(pcs->interface),
+ phy_speed_to_str(pcs->speed),
+ phy_duplex_to_str(pcs->duplex),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->lp_advertising,
+ pcs->link, pcs->autoneg, pcs->autoneg_complete);
+}
+
+static void vsc9959_pcs_link_state_sgmii(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int err;
+
+ err = genphy_update_link(pcs);
+ if (err < 0)
+ return;
+
+ if (pcs->autoneg_complete) {
+ u16 lpa = phy_read(pcs, MII_LPA);
+
+ mii_lpa_to_linkmode_lpa_sgmii(pcs->lp_advertising, lpa);
+
+ phy_resolve_aneg_linkmode(pcs);
+ }
+}
+
+static void vsc9959_pcs_link_state_2500basex(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int err;
+
+ err = genphy_update_link(pcs);
+ if (err < 0)
+ return;
+
+ pcs->speed = SPEED_2500;
+ pcs->asym_pause = true;
+ pcs->pause = true;
+}
+
+static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int status, lpa;
+
+ status = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_BMSR);
+ if (status < 0)
+ return;
+
+ pcs->autoneg = true;
+ pcs->autoneg_complete = USXGMII_BMSR_AN_CMPL(status);
+ pcs->link = USXGMII_BMSR_LNKS(status);
+
+ if (!pcs->link || !pcs->autoneg_complete)
+ return;
+
+ lpa = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0)
+ return;
+
+ switch (USXGMII_LPA_SPEED(lpa)) {
+ case USXGMII_SPEED_10:
+ pcs->speed = SPEED_10;
+ break;
+ case USXGMII_SPEED_100:
+ pcs->speed = SPEED_100;
+ break;
+ case USXGMII_SPEED_1000:
+ pcs->speed = SPEED_1000;
+ break;
+ case USXGMII_SPEED_2500:
+ pcs->speed = SPEED_2500;
+ break;
+ default:
+ break;
+ }
+
+ if (USXGMII_LPA_DUPLEX(lpa))
+ pcs->duplex = DUPLEX_FULL;
+ else
+ pcs->duplex = DUPLEX_HALF;
+}
+
+static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
+ struct phylink_link_state *state)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ pcs->speed = SPEED_UNKNOWN;
+ pcs->duplex = DUPLEX_UNKNOWN;
+ pcs->pause = 0;
+ pcs->asym_pause = 0;
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_link_state_sgmii(pcs, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ vsc9959_pcs_link_state_2500basex(pcs, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_link_state_usxgmii(pcs, state);
+ break;
+ default:
+ return;
+ }
+
+ vsc9959_pcs_link_state_resolve(pcs, state);
+}
+
+static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
+ phy_interface_t phy_mode)
+{
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_GMII:
+ /* Only supported on internal to-CPU ports */
+ if (port != 4 && port != 5)
+ return -ENOTSUPP;
+ return 0;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* Not supported on internal to-CPU ports */
+ if (port == 4 || port == 5)
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
};
+static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
+ void __iomem *imdio_regs;
+ struct resource *res;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int port;
+ int rc;
+
+ felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
+ sizeof(struct phy_device *),
+ GFP_KERNEL);
+ if (!felix->pcs) {
+ dev_err(dev, "failed to allocate array for PCS PHYs\n");
+ return -ENOMEM;
+ }
+
+ imdio_base = pci_resource_start(felix->pdev,
+ felix->info->imdio_pci_bar);
+
+ res = felix->info->imdio_res;
+ res->flags = IORESOURCE_MEM;
+ res->start += imdio_base;
+ res->end += imdio_base;
+
+ imdio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(imdio_regs)) {
+ dev_err(dev, "failed to map internal MDIO registers\n");
+ return PTR_ERR(imdio_regs);
+ }
+
+ hw = enetc_hw_alloc(dev, imdio_regs);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to allocate ENETC HW structure\n");
+ return PTR_ERR(hw);
+ }
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "VSC9959 internal MDIO bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ /* This gets added to imdio_regs, which already maps addresses
+ * starting with the proper offset.
+ */
+ mdio_priv->mdio_base = 0;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ /* Needed in order to initialize the bus mutex lock */
+ rc = mdiobus_register(bus);
+ if (rc < 0) {
+ dev_err(dev, "failed to register MDIO bus\n");
+ return rc;
+ }
+
+ felix->imdio = bus;
+
+ for (port = 0; port < felix->info->num_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phy_device *pcs;
+ bool is_c45 = false;
+
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ is_c45 = true;
+
+ pcs = get_phy_device(felix->imdio, port, is_c45);
+ if (IS_ERR(pcs))
+ continue;
+
+ pcs->interface = ocelot_port->phy_mode;
+ felix->pcs[port] = pcs;
+
+ dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
+ }
+
+ return 0;
+}
+
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ continue;
+
+ put_device(&pcs->mdio.dev);
+ }
+ mdiobus_unregister(felix->imdio);
+}
+
struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
+ .imdio_res = &vsc9959_imdio_res,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
@@ -579,5 +1091,12 @@ struct felix_info felix_info_vsc9959 = {
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.shared_queue_sz = 128 * 1024,
.num_ports = 6,
- .pci_bar = 4,
+ .switch_pci_bar = 4,
+ .imdio_pci_bar = 0,
+ .mdio_bus_alloc = vsc9959_mdio_bus_alloc,
+ .mdio_bus_free = vsc9959_mdio_bus_free,
+ .pcs_init = vsc9959_pcs_init,
+ .pcs_an_restart = vsc9959_pcs_an_restart,
+ .pcs_link_state = vsc9959_pcs_link_state,
+ .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
};
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
new file mode 100644
index 000000000000..e3c8d715a18f
--- /dev/null
+++ b/drivers/net/dsa/qca/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_AR9331
+ tristate "Qualcomm Atheros AR9331 Ethernet switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_AR9331
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
+ switch.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
new file mode 100644
index 000000000000..274022319066
--- /dev/null
+++ b/drivers/net/dsa/qca/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
new file mode 100644
index 000000000000..de25f99e995a
--- /dev/null
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2019 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+/*
+ * +----------------------+
+ * GMAC1----RGMII----|--MAC0 |
+ * \---MDIO1----|--REGs |----MDIO3----\
+ * | | | +------+
+ * | | +--| |
+ * | MAC1-|----RMII--M-----| PHY0 |-o P0
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC2-|----RMII--------| PHY1 |-o P1
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC3-|----RMII--------| PHY2 |-o P2
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC4-|----RMII--------| PHY3 |-o P3
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC5-|--+-RMII--M-----|-PHY4-|-o P4
+ * | | | | +------+
+ * +----------------------+ | \--CFG_SW_PHY_SWAP
+ * GMAC0---------------RMII--------------------/ \-CFG_SW_PHY_ADDR_SWAP
+ * \---MDIO0--NC
+ *
+ * GMAC0 and MAC5 are connected together and use same PHY. Depending on
+ * configuration it can be PHY4 (default) or PHY0. Only GMAC0 or MAC5 can be
+ * used at same time. If GMAC0 is used (default) then MAC5 should be disabled.
+ *
+ * CFG_SW_PHY_SWAP - swap connections of PHY0 and PHY4. If this bit is not set
+ * PHY4 is connected to GMAC0/MAC5 bundle and PHY0 is connected to MAC1. If this
+ * bit is set, PHY4 is connected to MAC1 and PHY0 is connected to GMAC0/MAC5
+ * bundle.
+ *
+ * CFG_SW_PHY_ADDR_SWAP - swap addresses of PHY0 and PHY4
+ *
+ * CFG_SW_PHY_SWAP and CFG_SW_PHY_ADDR_SWAP are part of SoC specific register
+ * set and not related to switch internal registers.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AR9331_SW_NAME "ar9331_switch"
+#define AR9331_SW_PORTS 6
+
+/* dummy reg to change page */
+#define AR9331_SW_REG_PAGE 0x40000
+
+/* Global Interrupt */
+#define AR9331_SW_REG_GINT 0x10
+#define AR9331_SW_REG_GINT_MASK 0x14
+#define AR9331_SW_GINT_PHY_INT BIT(2)
+
+#define AR9331_SW_REG_FLOOD_MASK 0x2c
+#define AR9331_SW_FLOOD_MASK_BROAD_TO_CPU BIT(26)
+
+#define AR9331_SW_REG_GLOBAL_CTRL 0x30
+#define AR9331_SW_GLOBAL_CTRL_MFS_M GENMASK(13, 0)
+
+#define AR9331_SW_REG_MDIO_CTRL 0x98
+#define AR9331_SW_MDIO_CTRL_BUSY BIT(31)
+#define AR9331_SW_MDIO_CTRL_MASTER_EN BIT(30)
+#define AR9331_SW_MDIO_CTRL_CMD_READ BIT(27)
+#define AR9331_SW_MDIO_CTRL_PHY_ADDR_M GENMASK(25, 21)
+#define AR9331_SW_MDIO_CTRL_REG_ADDR_M GENMASK(20, 16)
+#define AR9331_SW_MDIO_CTRL_DATA_M GENMASK(16, 0)
+
+#define AR9331_SW_REG_PORT_STATUS(_port) (0x100 + (_port) * 0x100)
+
+/* FLOW_LINK_EN - enable mac flow control config auto-neg with phy.
+ * If not set, mac can be config by software.
+ */
+#define AR9331_SW_PORT_STATUS_FLOW_LINK_EN BIT(12)
+
+/* LINK_EN - If set, MAC is configured from PHY link status.
+ * If not set, MAC should be configured by software.
+ */
+#define AR9331_SW_PORT_STATUS_LINK_EN BIT(9)
+#define AR9331_SW_PORT_STATUS_DUPLEX_MODE BIT(6)
+#define AR9331_SW_PORT_STATUS_RX_FLOW_EN BIT(5)
+#define AR9331_SW_PORT_STATUS_TX_FLOW_EN BIT(4)
+#define AR9331_SW_PORT_STATUS_RXMAC BIT(3)
+#define AR9331_SW_PORT_STATUS_TXMAC BIT(2)
+#define AR9331_SW_PORT_STATUS_SPEED_M GENMASK(1, 0)
+#define AR9331_SW_PORT_STATUS_SPEED_1000 2
+#define AR9331_SW_PORT_STATUS_SPEED_100 1
+#define AR9331_SW_PORT_STATUS_SPEED_10 0
+
+#define AR9331_SW_PORT_STATUS_MAC_MASK \
+ (AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC)
+
+#define AR9331_SW_PORT_STATUS_LINK_MASK \
+ (AR9331_SW_PORT_STATUS_LINK_EN | AR9331_SW_PORT_STATUS_FLOW_LINK_EN | \
+ AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
+ AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
+ AR9331_SW_PORT_STATUS_SPEED_M)
+
+/* Phy bypass mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ *
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b00 |PhyAdd[2:0]| Reg Addr[4:0] | TA |
+ *
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ *
+ * ------------------------------------------------------------------------
+ * Page address mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b11 | 8'b0 | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| | Page [9:0] |
+ */
+/* In case of Page Address mode, Bit[18:9] of 32 bit register address should be
+ * written to bits[9:0] of mdio data register.
+ */
+#define AR9331_SW_ADDR_PAGE GENMASK(18, 9)
+
+/* ------------------------------------------------------------------------
+ * Normal register access mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b10 | low_addr[7:0] | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ * ------------------------------------------------------------------------
+ */
+#define AR9331_SW_LOW_ADDR_PHY GENMASK(8, 6)
+#define AR9331_SW_LOW_ADDR_REG GENMASK(5, 1)
+
+#define AR9331_SW_MDIO_PHY_MODE_M GENMASK(4, 3)
+#define AR9331_SW_MDIO_PHY_MODE_PAGE 3
+#define AR9331_SW_MDIO_PHY_MODE_REG 2
+#define AR9331_SW_MDIO_PHY_MODE_BYPASS 0
+#define AR9331_SW_MDIO_PHY_ADDR_M GENMASK(2, 0)
+
+/* Empirical determined values */
+#define AR9331_SW_MDIO_POLL_SLEEP_US 1
+#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+
+struct ar9331_sw_priv {
+ struct device *dev;
+ struct dsa_switch ds;
+ struct dsa_switch_ops ops;
+ struct irq_domain *irqdomain;
+ struct mii_bus *mbus; /* mdio master */
+ struct mii_bus *sbus; /* mdio slave */
+ struct regmap *regmap;
+ struct reset_control *sw_reset;
+};
+
+/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
+ * If some kind of optimization is used, the request should be repeated.
+ */
+static int ar9331_sw_reset(struct ar9331_sw_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->sw_reset);
+ if (ret)
+ goto error;
+
+ /* AR9331 doc do not provide any information about proper reset
+ * sequence. The AR8136 (the closes switch to the AR9331) doc says:
+ * reset duration should be greater than 10ms. So, let's use this value
+ * for now.
+ */
+ usleep_range(10000, 15000);
+ ret = reset_control_deassert(priv->sw_reset);
+ if (ret)
+ goto error;
+ /* There is no information on how long should we wait after reset.
+ * AR8136 has an EEPROM and there is an Interrupt for EEPROM load
+ * status. AR9331 has no EEPROM support.
+ * For now, do not wait. In case AR8136 will be needed, the after
+ * reset delay can be added as well.
+ */
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_write(struct mii_bus *mbus, int port, int regnum,
+ u16 data)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_DATA_M, data));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "PHY write error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_read(struct mii_bus *mbus, int port, int regnum)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ AR9331_SW_MDIO_CTRL_CMD_READ |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_MDIO_CTRL, &val);
+ if (ret)
+ goto error;
+
+ return FIELD_GET(AR9331_SW_MDIO_CTRL_DATA_M, val);
+
+error:
+ dev_err_ratelimited(priv->dev, "PHY read error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct mii_bus *mbus;
+ struct device_node *np, *mnp;
+ int ret;
+
+ np = dev->of_node;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = np->full_name;
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%pOF", np);
+
+ mbus->read = ar9331_sw_mbus_read;
+ mbus->write = ar9331_sw_mbus_write;
+ mbus->priv = priv;
+ mbus->parent = dev;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ return -ENODEV;
+
+ ret = of_mdiobus_register(mbus, mnp);
+ of_node_put(mnp);
+ if (ret)
+ return ret;
+
+ priv->mbus = mbus;
+
+ return 0;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = ar9331_sw_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Reset will set proper defaults. CPU - Port0 will be enabled and
+ * configured. All other ports (ports 1 - 5) are disabled
+ */
+ ret = ar9331_sw_mbus_init(priv);
+ if (ret)
+ return ret;
+
+ /* Do not drop broadcast frames */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_FLOOD_MASK,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU);
+ if (ret)
+ goto error;
+
+ /* Set max frame size to the maximum supported value */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_GLOBAL_CTRL,
+ AR9331_SW_GLOBAL_CTRL_MFS_M,
+ AR9331_SW_GLOBAL_CTRL_MFS_M);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static void ar9331_sw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_STATUS(port), 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_AR9331;
+}
+
+static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
+ state->interface, port);
+}
+
+static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+ u32 val;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ val = AR9331_SW_PORT_STATUS_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = AR9331_SW_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_10:
+ val = AR9331_SW_PORT_STATUS_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ if (state->duplex)
+ val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
+
+ if (state->pause & MLO_PAUSE_TX)
+ val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
+
+ if (state->pause & MLO_PAUSE_RX)
+ val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_LINK_MASK, val);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK, 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK,
+ AR9331_SW_PORT_STATUS_MAC_MASK);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static const struct dsa_switch_ops ar9331_sw_ops = {
+ .get_tag_protocol = ar9331_sw_get_tag_protocol,
+ .setup = ar9331_sw_setup,
+ .port_disable = ar9331_sw_port_disable,
+ .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_mac_config = ar9331_sw_phylink_mac_config,
+ .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
+static irqreturn_t ar9331_sw_irq(int irq, void *data)
+{
+ struct ar9331_sw_priv *priv = data;
+ struct regmap *regmap = priv->regmap;
+ u32 stat;
+ int ret;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_GINT, &stat);
+ if (ret) {
+ dev_err(priv->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & AR9331_SW_GINT_PHY_INT) {
+ int child_irq;
+
+ child_irq = irq_find_mapping(priv->irqdomain, 0);
+ handle_nested_irq(child_irq);
+ }
+
+ ret = regmap_write(regmap, AR9331_SW_REG_GINT, stat);
+ if (ret) {
+ dev_err(priv->dev, "can't write interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ar9331_sw_mask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT, 0);
+ if (ret)
+ dev_err(priv->dev, "could not mask IRQ\n");
+}
+
+static void ar9331_sw_unmask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT,
+ AR9331_SW_GINT_PHY_INT);
+ if (ret)
+ dev_err(priv->dev, "could not unmask IRQ\n");
+}
+
+static struct irq_chip ar9331_sw_irq_chip = {
+ .name = AR9331_SW_NAME,
+ .irq_mask = ar9331_sw_mask_irq,
+ .irq_unmask = ar9331_sw_unmask_irq,
+};
+
+static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &ar9331_sw_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void ar9331_sw_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ar9331_sw_irqdomain_ops = {
+ .map = ar9331_sw_irq_map,
+ .unmap = ar9331_sw_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ int ret, irq;
+
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ\n");
+ return irq ? irq : -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
+ IRQF_ONESHOT, AR9331_SW_NAME, priv);
+ if (ret) {
+ dev_err(dev, "unable to request irq: %d\n", ret);
+ return ret;
+ }
+
+ priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_parent(irq_create_mapping(priv->irqdomain, 0), irq);
+
+ return 0;
+}
+
+static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, mode) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return mdiobus_write(sbus, p, r, val);
+}
+
+static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, AR9331_SW_MDIO_PHY_MODE_REG) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return mdiobus_read(sbus, p, r);
+}
+
+static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct ar9331_sw_priv *priv = ctx;
+ struct mii_bus *sbus = priv->sbus;
+ u32 reg = *(u32 *)reg_buf;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ /* We cannot read the page selector register from hardware and
+ * we cache its value in regmap. Return all bits set here,
+ * that regmap will always write the page on first use.
+ */
+ *(u32 *)val_buf = GENMASK(9, 0);
+ return 0;
+ }
+
+ ret = __ar9331_mdio_read(sbus, reg);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf = ret;
+ ret = __ar9331_mdio_read(sbus, reg + 2);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf |= ret << 16;
+
+ return 0;
+error:
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+ return ret;
+}
+
+static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ctx;
+ struct mii_bus *sbus = priv->sbus;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
+ 0, val);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+ }
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+ if (ret < 0)
+ goto error;
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+ val >> 16);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+ return ret;
+}
+
+static int ar9331_sw_bus_write(void *context, const void *data, size_t count)
+{
+ u32 reg = *(u32 *)data;
+ u32 val = *((u32 *)data + 1);
+
+ return ar9331_mdio_write(context, reg, val);
+}
+
+static const struct regmap_range ar9331_valid_regs[] = {
+ regmap_reg_range(0x0, 0x0),
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x20, 0x24),
+ regmap_reg_range(0x2c, 0x30),
+ regmap_reg_range(0x40, 0x44),
+ regmap_reg_range(0x50, 0x78),
+ regmap_reg_range(0x80, 0x98),
+
+ regmap_reg_range(0x100, 0x120),
+ regmap_reg_range(0x200, 0x220),
+ regmap_reg_range(0x300, 0x320),
+ regmap_reg_range(0x400, 0x420),
+ regmap_reg_range(0x500, 0x520),
+ regmap_reg_range(0x600, 0x620),
+
+ regmap_reg_range(0x20000, 0x200a4),
+ regmap_reg_range(0x20100, 0x201a4),
+ regmap_reg_range(0x20200, 0x202a4),
+ regmap_reg_range(0x20300, 0x203a4),
+ regmap_reg_range(0x20400, 0x204a4),
+ regmap_reg_range(0x20500, 0x205a4),
+
+ /* dummy page selector reg */
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range ar9331_nonvolatile_regs[] = {
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range_cfg ar9331_regmap_range[] = {
+ {
+ .selector_reg = AR9331_SW_REG_PAGE,
+ .selector_mask = GENMASK(9, 0),
+ .selector_shift = 0,
+
+ .window_start = 0,
+ .window_len = 512,
+
+ .range_min = 0,
+ .range_max = AR9331_SW_REG_PAGE - 4,
+ },
+};
+
+static const struct regmap_access_table ar9331_register_set = {
+ .yes_ranges = ar9331_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ar9331_valid_regs),
+};
+
+static const struct regmap_access_table ar9331_volatile_set = {
+ .no_ranges = ar9331_nonvolatile_regs,
+ .n_no_ranges = ARRAY_SIZE(ar9331_nonvolatile_regs),
+};
+
+static const struct regmap_config ar9331_mdio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AR9331_SW_REG_PAGE,
+
+ .ranges = ar9331_regmap_range,
+ .num_ranges = ARRAY_SIZE(ar9331_regmap_range),
+
+ .volatile_table = &ar9331_volatile_set,
+ .wr_table = &ar9331_register_set,
+ .rd_table = &ar9331_register_set,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static struct regmap_bus ar9331_sw_bus = {
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .read = ar9331_mdio_read,
+ .write = ar9331_sw_bus_write,
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+};
+
+static int ar9331_sw_probe(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv;
+ struct dsa_switch *ds;
+ int ret;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, &ar9331_sw_bus, priv,
+ &ar9331_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->sw_reset = devm_reset_control_get(&mdiodev->dev, "switch");
+ if (IS_ERR(priv->sw_reset)) {
+ dev_err(&mdiodev->dev, "missing switch reset\n");
+ return PTR_ERR(priv->sw_reset);
+ }
+
+ priv->sbus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+
+ ret = ar9331_sw_irq_init(priv);
+ if (ret)
+ return ret;
+
+ ds = &priv->ds;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = AR9331_SW_PORTS;
+ ds->priv = priv;
+ priv->ops = ar9331_sw_ops;
+ ds->ops = &priv->ops;
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = dsa_register_switch(ds);
+ if (ret)
+ goto err_remove_irq;
+
+ return 0;
+
+err_remove_irq:
+ irq_domain_remove(priv->irqdomain);
+
+ return ret;
+}
+
+static void ar9331_sw_remove(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ irq_domain_remove(priv->irqdomain);
+ mdiobus_unregister(priv->mbus);
+ dsa_unregister_switch(&priv->ds);
+
+ reset_control_assert(priv->sw_reset);
+}
+
+static const struct of_device_id ar9331_sw_of_match[] = {
+ { .compatible = "qca,ar9331-switch" },
+ { },
+};
+
+static struct mdio_driver ar9331_sw_mdio_driver = {
+ .probe = ar9331_sw_probe,
+ .remove = ar9331_sw_remove,
+ .mdiodrv.driver = {
+ .name = AR9331_SW_NAME,
+ .of_match_table = ar9331_sw_of_match,
+ },
+};
+
+mdio_module_driver(ar9331_sw_mdio_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for Atheros AR9331 switch");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index e548289df31e..9f4205b4439b 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1017,7 +1017,8 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port)
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_QCA;
}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index f5cc8b0a7c74..fd1977590cb4 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -964,7 +964,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
}
static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
/* For now, the RTL switches are handled without any custom tags.
*
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 1da5ac111499..03ba6d25f7fe 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -426,14 +426,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.tpid2 = ETH_P_SJA1105,
};
struct sja1105_table *table;
- int i, k = 0;
-
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (dsa_is_dsa_port(priv->ds, i))
- default_general_params.casc_port = i;
- else if (dsa_is_user_port(priv->ds, i))
- priv->ports[i].mgmt_slot = k++;
- }
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@ -582,7 +574,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device *dev = &priv->spidev->dev;
struct device_node *child;
- for_each_child_of_node(ports_node, child) {
+ for_each_available_child_of_node(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1542,7 +1534,8 @@ static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
}
static enum dsa_tag_protocol
-sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_SJA1105;
}
@@ -1740,6 +1733,16 @@ static int sja1105_setup(struct dsa_switch *ds)
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
+ int port;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
@@ -1761,6 +1764,18 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
return 0;
}
+static void sja1105_port_disable(struct dsa_switch *ds, int port)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ kthread_cancel_work_sync(&sp->xmit_work);
+ skb_queue_purge(&sp->xmit_queue);
+}
+
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -1819,47 +1834,36 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK;
}
+#define work_to_port(work) \
+ container_of((work), struct sja1105_port, xmit_work)
+#define tagger_to_sja1105(t) \
+ container_of((t), struct sja1105_private, tagger_data)
+
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
* lock on the bus)
*/
-static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
- struct sk_buff *skb)
+static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
- int slot = sp->mgmt_slot;
- struct sk_buff *clone;
-
- /* The tragic fact about the switch having 4x2 slots for installing
- * management routes is that all of them except one are actually
- * useless.
- * If 2 slots are simultaneously configured for two BPDUs sent to the
- * same (multicast) DMAC but on different egress ports, the switch
- * would confuse them and redirect first frame it receives on the CPU
- * port towards the port configured on the numerically first slot
- * (therefore wrong port), then second received frame on second slot
- * (also wrong port).
- * So for all practical purposes, there needs to be a lock that
- * prevents that from happening. The slot used here is utterly useless
- * (could have simply been 0 just as fine), but we are doing it
- * nonetheless, in case a smarter idea ever comes up in the future.
- */
- mutex_lock(&priv->mgmt_lock);
+ struct sja1105_port *sp = work_to_port(work);
+ struct sja1105_tagger_data *tagger_data = sp->data;
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ int port = sp - priv->ports;
+ struct sk_buff *skb;
- /* The clone, if there, was made by dsa_skb_tx_timestamp */
- clone = DSA_SKB_CB(skb)->clone;
+ while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
- sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
+ mutex_lock(&priv->mgmt_lock);
- if (!clone)
- goto out;
+ sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
- sja1105_ptp_txtstamp_skb(ds, port, clone);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
-out:
- mutex_unlock(&priv->mgmt_lock);
- return NETDEV_TX_OK;
+ mutex_unlock(&priv->mgmt_lock);
+ }
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
@@ -1990,6 +1994,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
.port_enable = sja1105_port_enable,
+ .port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -2003,7 +2008,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
- .port_deferred_xmit = sja1105_port_deferred_xmit,
.port_hwtstamp_get = sja1105_hwtstamp_get,
.port_hwtstamp_set = sja1105_hwtstamp_set,
.port_rxtstamp = sja1105_port_rxtstamp,
@@ -2055,7 +2059,7 @@ static int sja1105_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct sja1105_private *priv;
struct dsa_switch *ds;
- int rc, i;
+ int rc, port;
if (!dev->of_node) {
dev_err(dev, "No DTS bindings for SJA1105 driver\n");
@@ -2120,15 +2124,42 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
/* Connections between dsa_port and sja1105_port */
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- struct sja1105_port *sp = &priv->ports[i];
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *slave;
- dsa_to_port(ds, i)->priv = sp;
- sp->dp = dsa_to_port(ds, i);
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp->priv = sp;
+ sp->dp = dp;
sp->data = tagger_data;
+ slave = dp->slave;
+ kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
+ sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
+ slave->name);
+ if (IS_ERR(sp->xmit_worker)) {
+ rc = PTR_ERR(sp->xmit_worker);
+ dev_err(ds->dev,
+ "failed to create deferred xmit thread: %d\n",
+ rc);
+ goto out;
+ }
+ skb_queue_head_init(&sp->xmit_queue);
}
return 0;
+out:
+ while (port-- > 0) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
+ return rc;
}
static int sja1105_remove(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 43ab7589d0d0..a836fc38c4a4 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -83,6 +83,7 @@ static int sja1105_init_avb_params(struct sja1105_private *priv,
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
int rc;
@@ -101,6 +102,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
kfree_skb(priv->tagger_data.stampable_skb);
priv->tagger_data.stampable_skb = NULL;
}
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
}
@@ -367,22 +370,16 @@ static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
ptp_sts);
}
-#define rxtstamp_to_tagger(d) \
- container_of((d), struct sja1105_tagger_data, rxtstamp_work)
-#define tagger_to_sja1105(d) \
- container_of((d), struct sja1105_private, tagger_data)
-
-static void sja1105_rxtstamp_work(struct work_struct *work)
+static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
{
- struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work);
- struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
struct dsa_switch *ds = priv->ds;
struct sk_buff *skb;
mutex_lock(&ptp_data->lock);
- while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) {
+ while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ticks, ts;
int rc;
@@ -404,6 +401,9 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
}
mutex_unlock(&ptp_data->lock);
+
+ /* Don't restart */
+ return -1;
}
/* Called from dsa_skb_defer_rx_timestamp */
@@ -411,16 +411,16 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
* timestamp. For that we need a sleepable context.
*/
- skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb);
- schedule_work(&tagger_data->rxtstamp_work);
+ skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
+ ptp_schedule_worker(ptp_data->clock, 0);
return true;
}
@@ -628,11 +628,11 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.adjtime = sja1105_ptp_adjtime,
.gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
+ .do_aux_work = sja1105_rxtstamp_work,
.max_adj = SJA1105_MAX_ADJ_PPB,
};
- skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
- INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -653,8 +653,8 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- cancel_work_sync(&priv->tagger_data.rxtstamp_work);
- skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 470f44b76318..6f4a19eec709 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -30,6 +30,7 @@ struct sja1105_ptp_cmd {
};
struct sja1105_ptp_data {
+ struct sk_buff_head skb_rxtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 42c1574d45f2..6e21a2a5cf01 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -542,7 +542,8 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
}
static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
/* The switch internally uses a 8 byte header with length,
* source port, tag, LPA and priority. This is supposedly
@@ -1111,7 +1112,9 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
+#if IS_ENABLED(CONFIG_OF_GPIO)
vsc->gc.of_node = vsc->dev->of_node;
+#endif
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 3da97996bdf3..8cafd06ff0c4 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -196,7 +196,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void el3_tx_timeout (struct net_device *dev);
+static void el3_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void el3_down(struct net_device *dev);
static void el3_up(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
@@ -689,7 +689,7 @@ el3_open(struct net_device *dev)
}
static void
-el3_tx_timeout (struct net_device *dev)
+el3_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b15752267c8d..1e233e2f0a5a 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -371,7 +371,7 @@ static void corkscrew_timer(struct timer_list *t);
static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int corkscrew_rx(struct net_device *dev);
-static void corkscrew_timeout(struct net_device *dev);
+static void corkscrew_timeout(struct net_device *dev, unsigned int txqueue);
static int boomerang_rx(struct net_device *dev);
static irqreturn_t corkscrew_interrupt(int irq, void *dev_id);
static int corkscrew_close(struct net_device *dev);
@@ -961,7 +961,7 @@ static void corkscrew_timer(struct timer_list *t)
#endif /* AUTOMEDIA */
}
-static void corkscrew_timeout(struct net_device *dev)
+static void corkscrew_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct corkscrew_private *vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 3044a6f35f04..ef1c3151fbb2 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -234,7 +234,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev, int worklimit);
static int el3_close(struct net_device *dev);
-static void el3_tx_timeout(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -690,7 +690,7 @@ static int el3_open(struct net_device *dev)
return 0;
}
-static void el3_tx_timeout(struct net_device *dev)
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 2b2695311bda..d47cde6c5f08 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -173,7 +173,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
-static void el3_tx_timeout(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static const struct ethtool_ops netdev_ethtool_ops;
@@ -526,7 +526,7 @@ static int el3_open(struct net_device *dev)
return 0;
}
-static void el3_tx_timeout(struct net_device *dev)
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8785c2ff3825..a2b7f7ab8170 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -776,7 +776,7 @@ static void set_rx_mode(struct net_device *dev);
#ifdef CONFIG_PCI
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
-static void vortex_tx_timeout(struct net_device *dev);
+static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void acpi_set_WOL(struct net_device *dev);
static const struct ethtool_ops vortex_ethtool_ops;
static void set_8021q_mode(struct net_device *dev, int enable);
@@ -1548,7 +1548,7 @@ vortex_up(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned int config;
- int i, mii_reg1, mii_reg5, err = 0;
+ int i, mii_reg5, err = 0;
if (VORTEX_PCI(vp)) {
pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
@@ -1605,7 +1605,7 @@ vortex_up(struct net_device *dev)
window_write32(vp, config, 3, Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
- mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
+ mdio_read(dev, vp->phys[0], MII_BMSR);
mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
vp->mii.full_duplex = vp->full_duplex;
@@ -1877,7 +1877,7 @@ leave_media_alone:
iowrite16(FakeIntr, ioaddr + EL3_CMD);
}
-static void vortex_tx_timeout(struct net_device *dev)
+static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index be823c186517..14fce6658106 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2013,7 +2013,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
}
static void
-typhoon_tx_timeout(struct net_device *dev)
+typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct typhoon *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 78f3e532c600..0e0aa4016858 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -36,9 +36,9 @@ void ei_set_multicast_list(struct net_device *dev)
}
EXPORT_SYMBOL(ei_set_multicast_list);
-void ei_tx_timeout(struct net_device *dev)
+void ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- __ei_tx_timeout(dev);
+ __ei_tx_timeout(dev, txqueue);
}
EXPORT_SYMBOL(ei_tx_timeout);
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 3e2f2c2e7b58..529c728f334a 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -32,7 +32,7 @@ void NS8390_init(struct net_device *dev, int startp);
int ei_open(struct net_device *dev);
int ei_close(struct net_device *dev);
irqreturn_t ei_interrupt(int irq, void *dev_id);
-void ei_tx_timeout(struct net_device *dev);
+void ei_tx_timeout(struct net_device *dev, unsigned int txqueue);
netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
void ei_set_multicast_list(struct net_device *dev);
struct net_device_stats *ei_get_stats(struct net_device *dev);
@@ -50,7 +50,7 @@ void NS8390p_init(struct net_device *dev, int startp);
int eip_open(struct net_device *dev);
int eip_close(struct net_device *dev);
irqreturn_t eip_interrupt(int irq, void *dev_id);
-void eip_tx_timeout(struct net_device *dev);
+void eip_tx_timeout(struct net_device *dev, unsigned int txqueue);
netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
void eip_set_multicast_list(struct net_device *dev);
struct net_device_stats *eip_get_stats(struct net_device *dev);
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6cf36992a2c6..6834742057b3 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -41,9 +41,9 @@ void eip_set_multicast_list(struct net_device *dev)
}
EXPORT_SYMBOL(eip_set_multicast_list);
-void eip_tx_timeout(struct net_device *dev)
+void eip_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- __ei_tx_timeout(dev);
+ __ei_tx_timeout(dev, txqueue);
}
EXPORT_SYMBOL(eip_tx_timeout);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 0b6bbf63f7ca..aeae7966a082 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -83,7 +83,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void axnet_tx_timeout(struct net_device *dev);
+static void axnet_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(struct timer_list *t);
static void axnet_reset_8390(struct net_device *dev);
@@ -903,7 +903,7 @@ static int ax_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-static void axnet_tx_timeout(struct net_device *dev)
+static void axnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index c9c55c9eab9f..babc92e2692e 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -251,7 +251,7 @@ static int __ei_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-static void __ei_tx_timeout(struct net_device *dev)
+static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 816540e6beac..165d18405b0c 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -576,7 +576,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
@@ -1105,7 +1105,7 @@ static void check_duplex(struct net_device *dev)
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 174344c450af..cb6a761d5c11 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3651,15 +3651,6 @@ static int et131x_close(struct net_device *netdev)
return del_timer_sync(&adapter->error_timer);
}
-static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
- int cmd)
-{
- if (!netdev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
-}
-
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
@@ -3811,7 +3802,7 @@ drop_err:
* specified by the 'tx_timeo" element in the net_device structure (see
* et131x_alloc_device() to see how this value is set).
*/
-static void et131x_tx_timeout(struct net_device *netdev)
+static void et131x_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -3899,7 +3890,7 @@ static const struct net_device_ops et131x_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = et131x_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
};
static int et131x_pci_setup(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 80ef3e15bd22..9daef4c8feef 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1791,7 +1791,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
sdev->pdev = pdev;
sdev->netdev = dev;
- sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0),
+ sdev->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!sdev->regs) {
dev_err(&pdev->dev, "failed to map registers\n");
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0537df06a9b5..22cadfbeedfb 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -207,19 +207,6 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
-static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -407,7 +394,7 @@ static void emac_init_device(struct net_device *dev)
}
/* Our watchdog timed out. Called by the networking layer */
-static void emac_timeout(struct net_device *dev)
+static void emac_timeout(struct net_device *dev, unsigned int txqueue)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long flags;
@@ -791,7 +778,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_tx_timeout = emac_timeout,
.ndo_set_rx_mode = emac_set_rx_mode,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 46b4207d3266..f366faf88eee 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -437,7 +437,7 @@ static const struct ethtool_ops ace_ethtool_ops = {
.set_link_ksettings = ace_set_link_ksettings,
};
-static void ace_watchdog(struct net_device *dev);
+static void ace_watchdog(struct net_device *dev, unsigned int txqueue);
static const struct net_device_ops ace_netdev_ops = {
.ndo_open = ace_open,
@@ -1542,7 +1542,7 @@ static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
}
-static void ace_watchdog(struct net_device *data)
+static void ace_watchdog(struct net_device *data, unsigned int txqueue)
{
struct net_device *dev = data;
struct ace_private *ap = netdev_priv(dev);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 4cd53fc338b5..1671c1f36691 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1332,10 +1332,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
return -EBUSY;
}
- *ptr = devm_ioremap_nocache(device, region->start,
+ *ptr = devm_ioremap(device, region->start,
resource_size(region));
if (*ptr == NULL) {
- dev_err(device, "ioremap_nocache of %s failed!", name);
+ dev_err(device, "ioremap of %s failed!", name);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index fc96c66b44cb..b4e891d49a94 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -740,7 +740,9 @@ static int ena_set_channels(struct net_device *netdev,
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES)
+ if (count < ENA_MIN_NUM_IO_QUEUES ||
+ (ena_xdp_present(adapter) &&
+ !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
return -EINVAL;
return ena_update_queue_count(adapter, count);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 948583fdcc28..894e8c1a8cf1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -36,7 +36,6 @@
#include <linux/cpu_rmap.h>
#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/numa.h>
@@ -47,6 +46,7 @@
#include <net/ip.h>
#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
@@ -78,7 +78,37 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
-static void ena_tx_timeout(struct net_device *dev)
+static void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
+ int count);
+static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
+ int count);
+static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
+static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count);
+static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
+static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
+static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
+static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
+static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+static int ena_up(struct ena_adapter *adapter);
+static void ena_down(struct ena_adapter *adapter);
+static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
+static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
+static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+
+static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -123,6 +153,448 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
+static int ena_xmit_common(struct net_device *dev,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int rc, nb_hw_desc;
+
+ if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
+ ena_tx_ctx))) {
+ netif_dbg(adapter, tx_queued, dev,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ ring->qid);
+ ena_com_write_sq_doorbell(ring->ena_com_io_sq);
+ }
+
+ /* prepare the packet's descriptors to dma engine */
+ rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
+ &nb_hw_desc);
+
+ /* In case there isn't enough space in the queue for the packet,
+ * we simply drop it. All other failure reasons of
+ * ena_com_prepare_tx() are fatal and therefore require a device reset.
+ */
+ if (unlikely(rc)) {
+ netif_err(adapter, tx_queued, dev,
+ "failed to prepare tx bufs\n");
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.prepare_ctx_err++;
+ u64_stats_update_end(&ring->syncp);
+ if (rc != -ENOMEM) {
+ adapter->reset_reason =
+ ENA_REGS_RESET_DRIVER_INVALID_STATE;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+ return rc;
+ }
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.cnt++;
+ ring->tx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->syncp);
+
+ tx_info->tx_descs = nb_hw_desc;
+ tx_info->last_jiffies = jiffies;
+ tx_info->print_once = 0;
+
+ ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
+ ring->ring_size);
+ return 0;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ u32 xdp_work_done, xdp_budget;
+ struct ena_ring *xdp_ring;
+ int napi_comp_call = 0;
+ int ret;
+
+ xdp_ring = ena_napi->xdp_ring;
+ xdp_ring->first_interrupt = ena_napi->first_interrupt;
+
+ xdp_budget = budget;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (xdp_budget > xdp_work_done) {
+ napi_comp_call = 1;
+ if (napi_complete_done(napi, xdp_work_done))
+ ena_unmask_interrupt(xdp_ring, NULL);
+ ena_update_ring_numa_node(xdp_ring, NULL);
+ ret = xdp_work_done;
+ } else {
+ ret = xdp_budget;
+ }
+
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.napi_comp += napi_comp_call;
+ xdp_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&xdp_ring->syncp);
+
+ return ret;
+}
+
+static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_buff *xdp,
+ void **push_hdr,
+ u32 *push_len)
+{
+ struct ena_adapter *adapter = xdp_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ dma_addr_t dma = 0;
+ u32 size;
+
+ tx_info->xdpf = convert_to_xdp_frame(xdp);
+ size = tx_info->xdpf->len;
+ ena_buf = tx_info->bufs;
+
+ /* llq push buffer */
+ *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+ *push_hdr = tx_info->xdpf->data;
+
+ if (size - *push_len > 0) {
+ dma = dma_map_single(xdp_ring->dev,
+ *push_hdr + *push_len,
+ size - *push_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 1;
+ tx_info->num_of_bufs = 1;
+ }
+
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ return 0;
+
+error_report_dma_error:
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&xdp_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map xdp buff\n");
+
+ xdp_return_frame_rx_napi(tx_info->xdpf);
+ tx_info->xdpf = NULL;
+ tx_info->num_of_bufs = 0;
+
+ return -EINVAL;
+}
+
+static int ena_xdp_xmit_buff(struct net_device *dev,
+ struct xdp_buff *xdp,
+ int qid,
+ struct ena_rx_buffer *rx_info)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_com_tx_ctx ena_tx_ctx = {0};
+ struct ena_tx_buffer *tx_info;
+ struct ena_ring *xdp_ring;
+ u16 next_to_use, req_id;
+ int rc;
+ void *push_hdr;
+ u32 push_len;
+
+ xdp_ring = &adapter->tx_ring[qid];
+ next_to_use = xdp_ring->next_to_use;
+ req_id = xdp_ring->free_ids[next_to_use];
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+ page_ref_inc(rx_info->page);
+ tx_info->xdp_rx_page = rx_info->page;
+
+ rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_hdr;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = push_len;
+
+ rc = ena_xmit_common(dev,
+ xdp_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdp->data_end - xdp->data);
+ if (rc)
+ goto error_unmap_dma;
+ /* trigger the dma engine. ena_com_write_sq_doorbell()
+ * has a mb
+ */
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.doorbells++;
+ u64_stats_update_end(&xdp_ring->syncp);
+
+ return NETDEV_TX_OK;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+ tx_info->xdpf = NULL;
+error_drop_packet:
+
+ return NETDEV_TX_OK;
+}
+
+static int ena_xdp_execute(struct ena_ring *rx_ring,
+ struct xdp_buff *xdp,
+ struct ena_rx_buffer *rx_info)
+{
+ struct bpf_prog *xdp_prog;
+ u32 verdict = XDP_PASS;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ if (!xdp_prog)
+ goto out;
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (verdict == XDP_TX)
+ ena_xdp_xmit_buff(rx_ring->netdev,
+ xdp,
+ rx_ring->qid + rx_ring->adapter->num_io_queues,
+ rx_info);
+ else if (unlikely(verdict == XDP_ABORTED))
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ else if (unlikely(verdict > XDP_TX))
+ bpf_warn_invalid_xdp_action(verdict);
+out:
+ rcu_read_unlock();
+ return verdict;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources(adapter);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first,
+ int count)
+{
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ xchg(&rx_ring->xdp_bpf_prog, prog);
+ if (prog) {
+ ena_xdp_register_rxq_info(rx_ring);
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ ena_xdp_unregister_rxq_info(rx_ring);
+ rx_ring->rx_headroom = 0;
+ }
+ }
+}
+
+void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ } else if (old_bpf_prog) {
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "xdp program set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ case XDP_QUERY_PROG:
+ bpf->prog_id = adapter->xdp_bpf_prog ?
+ adapter->xdp_bpf_prog->aux->id : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
@@ -164,7 +636,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter)
+static void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -172,13 +645,12 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
- /* TX/RX common ring state */
+ /* TX common ring state */
ena_init_io_rings_common(adapter, txr, i);
- ena_init_io_rings_common(adapter, rxr, i);
/* TX specific ring state */
txr->ring_size = adapter->requested_tx_ring_size;
@@ -188,14 +660,20 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
txr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
- /* RX specific ring state */
- rxr->ring_size = adapter->requested_rx_ring_size;
- rxr->rx_copybreak = adapter->rx_copybreak;
- rxr->sgl_size = adapter->max_rx_sgl_size;
- rxr->smoothed_interval =
- ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
- rxr->empty_rx_queue = 0;
- adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ /* Don't init RX queues for xdp queues */
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* RX common ring state */
+ ena_init_io_rings_common(adapter, rxr, i);
+
+ /* RX specific ring state */
+ rxr->ring_size = adapter->requested_rx_ring_size;
+ rxr->rx_copybreak = adapter->rx_copybreak;
+ rxr->sgl_size = adapter->max_rx_sgl_size;
+ rxr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+ rxr->empty_rx_queue = 0;
+ adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
}
}
@@ -285,16 +763,13 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
- * @adapter: private structure
- *
- * Return 0 on success, negative on failure
- */
-static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
+static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
@@ -308,11 +783,20 @@ err_setup_tx:
"Tx queue %d: allocation failed\n", i);
/* rewind the index freeing the rings as we go */
- while (i--)
+ while (first_index < i--)
ena_free_tx_resources(adapter, i);
return rc;
}
+static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
+{
+ int i;
+
+ for (i = first_index; i < first_index + count; i++)
+ ena_free_tx_resources(adapter, i);
+}
+
/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
* @adapter: board private structure
*
@@ -320,10 +804,10 @@ err_setup_tx:
*/
static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
- int i;
-
- for (i = 0; i < adapter->num_io_queues; i++)
- ena_free_tx_resources(adapter, i);
+ ena_free_all_io_tx_resources_in_range(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
}
static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
@@ -495,8 +979,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
rx_info->page = page;
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
- ena_buf->paddr = dma;
- ena_buf->len = ENA_PAGE_SIZE;
+ ena_buf->paddr = dma + rx_ring->rx_headroom;
+ ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
return 0;
}
@@ -513,7 +997,9 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
+ dma_unmap_page(rx_ring->dev,
+ ena_buf->paddr - rx_ring->rx_headroom,
+ ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
@@ -620,8 +1106,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -675,7 +1161,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring->qid, i);
}
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
}
@@ -688,7 +1174,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
@@ -699,7 +1185,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
@@ -723,6 +1209,32 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
+static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
+{
+ if (tx_info)
+ netif_err(ring->adapter,
+ tx_done,
+ ring->netdev,
+ "tx_info doesn't have valid %s",
+ is_xdp ? "xdp frame" : "skb");
+ else
+ netif_err(ring->adapter,
+ tx_done,
+ ring->netdev,
+ "Invalid req_id: %hu\n",
+ req_id);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.bad_req_id++;
+ u64_stats_update_end(&ring->syncp);
+
+ /* Trigger device reset */
+ ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
+ return -EFAULT;
+}
+
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
struct ena_tx_buffer *tx_info = NULL;
@@ -733,21 +1245,20 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
return 0;
}
- if (tx_info)
- netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
- "tx_info doesn't have valid skb\n");
- else
- netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
- "Invalid req_id: %hu\n", req_id);
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
+}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.bad_req_id++;
- u64_stats_update_end(&tx_ring->syncp);
+static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
- /* Trigger device reset */
- tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
- return -EFAULT;
+ if (likely(req_id < xdp_ring->ring_size)) {
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+ }
+
+ return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
@@ -786,7 +1297,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_info->skb = NULL;
tx_info->last_jiffies = 0;
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -1037,6 +1548,33 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
+int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ struct ena_rx_buffer *rx_info;
+ int ret;
+
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ xdp->data = page_address(rx_info->page) +
+ rx_info->page_offset + rx_ring->rx_headroom;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_hard_start = page_address(rx_info->page);
+ xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ /* If for some reason we received a bigger packet than
+ * we expect, then we simply drop it
+ */
+ if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
+ return XDP_DROP;
+
+ ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+
+ /* The xdp program might expand the headers */
+ if (ret == XDP_PASS) {
+ rx_info->page_offset = xdp->data - xdp->data_hard_start;
+ rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+ }
+
+ return ret;
+}
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1048,23 +1586,27 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
u32 budget)
{
u16 next_to_clean = rx_ring->next_to_clean;
- u32 res_budget, work_done;
-
struct ena_com_rx_ctx ena_rx_ctx;
struct ena_adapter *adapter;
+ u32 res_budget, work_done;
+ int rx_copybreak_pkt = 0;
+ int refill_threshold;
struct sk_buff *skb;
int refill_required;
- int refill_threshold;
- int rc = 0;
+ struct xdp_buff xdp;
int total_len = 0;
- int rx_copybreak_pkt = 0;
+ int xdp_verdict;
+ int rc = 0;
int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
+ xdp.rxq = &rx_ring->xdp_rxq;
do {
+ xdp_verdict = XDP_PASS;
+ skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.descs = 0;
@@ -1082,12 +1624,22 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ if (ena_xdp_present_ring(rx_ring))
+ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+
/* allocate skb and fill it */
- skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
- &next_to_clean);
+ if (xdp_verdict == XDP_PASS)
+ skb = ena_rx_skb(rx_ring,
+ rx_ring->ena_bufs,
+ ena_rx_ctx.descs,
+ &next_to_clean);
- /* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
+ if (xdp_verdict == XDP_TX) {
+ ena_free_rx_page(rx_ring,
+ &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
+ res_budget--;
+ }
for (i = 0; i < ena_rx_ctx.descs; i++) {
rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id;
@@ -1095,6 +1647,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
}
+ if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
+ continue;
break;
}
@@ -1188,9 +1742,14 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
struct ena_eth_io_intr_reg intr_reg;
- u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
- rx_ring->smoothed_interval :
- ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
+ u32 rx_interval = 0;
+ /* Rx ring can be NULL when for XDP tx queues which don't have an
+ * accompanying rx_ring pair.
+ */
+ if (rx_ring)
+ rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
+ rx_ring->smoothed_interval :
+ ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
/* Update intr register: rx intr delay,
* tx intr delay and interrupt unmask
@@ -1203,8 +1762,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
+ * The Tx ring is used because the rx_ring is NULL for XDP queues
*/
- ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+ ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
@@ -1222,22 +1782,82 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
- ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
+ if (rx_ring)
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq,
+ numa_node);
}
tx_ring->cpu = cpu;
- rx_ring->cpu = cpu;
+ if (rx_ring)
+ rx_ring->cpu = cpu;
return;
out:
put_cpu();
}
+static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ u32 tx_bytes = 0;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!xdp_ring))
+ return 0;
+ next_to_clean = xdp_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
+ &req_id);
+ if (rc)
+ break;
+
+ rc = validate_xdp_req_id(xdp_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ xdpf = tx_info->xdpf;
+
+ tx_info->xdpf = NULL;
+ tx_info->last_jiffies = 0;
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+
+ netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+ "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
+ xdpf);
+
+ tx_bytes += xdpf->len;
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+
+ __free_page(tx_info->xdp_rx_page);
+ xdp_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ xdp_ring->ring_size);
+ }
+
+ xdp_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
+
+ netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ xdp_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;
-
int tx_work_done;
int rx_work_done = 0;
int tx_budget;
@@ -1247,6 +1867,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring = ena_napi->tx_ring;
rx_ring = ena_napi->rx_ring;
+ tx_ring->first_interrupt = ena_napi->first_interrupt;
+ rx_ring->first_interrupt = ena_napi->first_interrupt;
+
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
@@ -1322,8 +1945,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- ena_napi->tx_ring->first_interrupt = true;
- ena_napi->rx_ring->first_interrupt = true;
+ ena_napi->first_interrupt = true;
napi_schedule_irqoff(&ena_napi->napi);
@@ -1398,10 +2020,12 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
{
struct net_device *netdev;
int irq_idx, i, cpu;
+ int io_queue_count;
netdev = adapter->netdev;
+ io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < io_queue_count; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
@@ -1529,45 +2153,64 @@ static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
synchronize_irq(adapter->irq_tbl[i].vector);
}
-static void ena_del_napi(struct ena_adapter *adapter)
+static void ena_del_napi_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
- netif_napi_del(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ /* Check if napi was initialized before */
+ if (!ENA_IS_XDP_INDEX(adapter, i) ||
+ adapter->ena_napi[i].xdp_ring)
+ netif_napi_del(&adapter->ena_napi[i].napi);
+ else
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].xdp_ring);
+ }
}
-static void ena_init_napi(struct ena_adapter *adapter)
+static void ena_init_napi_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
- struct ena_napi *napi;
+ struct ena_napi *napi = {0};
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
&adapter->ena_napi[i].napi,
- ena_io_poll,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
ENA_NAPI_BUDGET);
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
+
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ napi->rx_ring = &adapter->rx_ring[i];
+ napi->tx_ring = &adapter->tx_ring[i];
+ } else {
+ napi->xdp_ring = &adapter->tx_ring[i];
+ }
napi->qid = i;
}
}
-static void ena_napi_disable_all(struct ena_adapter *adapter)
+static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = first_index; i < first_index + count; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
-static void ena_napi_enable_all(struct ena_adapter *adapter)
+static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = first_index; i < first_index + count; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
@@ -1582,7 +2225,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -1620,7 +2263,9 @@ static int ena_up_complete(struct ena_adapter *adapter)
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
- ena_napi_enable_all(adapter);
+ ena_napi_enable_in_range(adapter,
+ 0,
+ adapter->xdp_num_queues + adapter->num_io_queues);
return 0;
}
@@ -1653,7 +2298,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
if (rc) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n",
- qid, rc);
+ qid, rc);
return rc;
}
@@ -1672,12 +2317,13 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
+static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1686,7 +2332,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
return 0;
create_err:
- while (i--)
+ while (i-- > first_index)
ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
return rc;
@@ -1731,13 +2377,15 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
netif_err(adapter, ifup, adapter->netdev,
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
qid, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- return rc;
+ goto err;
}
ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
return rc;
+err:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
@@ -1764,7 +2412,8 @@ create_err:
}
static void set_io_rings_size(struct ena_adapter *adapter,
- int new_tx_size, int new_rx_size)
+ int new_tx_size,
+ int new_rx_size)
{
int i;
@@ -1798,14 +2447,24 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
* ones due to past queue allocation failures.
*/
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
- adapter->requested_rx_ring_size);
+ adapter->requested_rx_ring_size);
while (1) {
- rc = ena_setup_all_tx_resources(adapter);
+ if (ena_xdp_present(adapter)) {
+ rc = ena_setup_and_create_all_xdp_queues(adapter);
+
+ if (rc)
+ goto err_setup_tx;
+ }
+ rc = ena_setup_tx_resources_in_range(adapter,
+ 0,
+ adapter->num_io_queues);
if (rc)
goto err_setup_tx;
- rc = ena_create_all_io_tx_queues(adapter);
+ rc = ena_create_io_tx_queues_in_range(adapter,
+ 0,
+ adapter->num_io_queues);
if (rc)
goto err_create_tx_queues;
@@ -1829,7 +2488,7 @@ err_setup_tx:
if (rc != -ENOMEM) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with error code %d\n",
- rc);
+ rc);
return rc;
}
@@ -1852,7 +2511,7 @@ err_setup_tx:
new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
- new_rx_ring_size < ENA_MIN_RING_SIZE) {
+ new_rx_ring_size < ENA_MIN_RING_SIZE) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE);
@@ -1871,10 +2530,11 @@ err_setup_tx:
static int ena_up(struct ena_adapter *adapter)
{
- int rc, i;
+ int io_queue_count, rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
+ io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
ena_setup_io_intr(adapter);
/* napi poll functions should be initialized before running
@@ -1882,7 +2542,7 @@ static int ena_up(struct ena_adapter *adapter)
* interrupt, causing the ISR to fire immediately while the poll
* function wasn't set yet, causing a null dereference
*/
- ena_init_napi(adapter);
+ ena_init_napi_in_range(adapter, 0, io_queue_count);
rc = ena_request_io_irq(adapter);
if (rc)
@@ -1913,7 +2573,7 @@ static int ena_up(struct ena_adapter *adapter)
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = 0; i < io_queue_count; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
@@ -1926,13 +2586,15 @@ err_up:
err_create_queues_with_backoff:
ena_free_io_irq(adapter);
err_req_irq:
- ena_del_napi(adapter);
+ ena_del_napi_in_range(adapter, 0, io_queue_count);
return rc;
}
static void ena_down(struct ena_adapter *adapter)
{
+ int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+
netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -1945,7 +2607,7 @@ static void ena_down(struct ena_adapter *adapter)
netif_tx_disable(adapter->netdev);
/* After this point the napi handler won't enable the tx queue */
- ena_napi_disable_all(adapter);
+ ena_napi_disable_in_range(adapter, 0, io_queue_count);
/* After destroy the queue there won't be any new interrupts */
@@ -1963,7 +2625,7 @@ static void ena_down(struct ena_adapter *adapter)
ena_disable_io_intr_sync(adapter);
ena_free_io_irq(adapter);
- ena_del_napi(adapter);
+ ena_del_napi_in_range(adapter, 0, io_queue_count);
ena_free_all_tx_bufs(adapter);
ena_free_all_rx_bufs(adapter);
@@ -2053,23 +2715,47 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
return dev_was_up ? ena_up(adapter) : 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int prev_channel_count;
bool dev_was_up;
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
+ prev_channel_count = adapter->num_io_queues;
adapter->num_io_queues = new_channel_count;
+ if (ena_xdp_present(adapter) &&
+ ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
+ adapter->xdp_first_ring = new_channel_count;
+ adapter->xdp_num_queues = new_channel_count;
+ if (prev_channel_count > new_channel_count)
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ NULL,
+ new_channel_count,
+ prev_channel_count);
+ else
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ adapter->xdp_bpf_prog,
+ prev_channel_count,
+ new_channel_count);
+ }
+
/* We need to destroy the rss table so that the indirection
* table will be reinitialized by ena_up()
*/
ena_com_rss_destroy(ena_dev);
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
return dev_was_up ? ena_open(adapter->netdev) : 0;
}
@@ -2253,7 +2939,7 @@ error_report_dma_error:
tx_info->skb = NULL;
tx_info->num_of_bufs += i;
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
return -EINVAL;
}
@@ -2268,7 +2954,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
void *push_hdr;
u16 next_to_use, req_id, header_len;
- int qid, rc, nb_hw_desc;
+ int qid, rc;
netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
/* Determine which tx ring we will be placed on */
@@ -2303,50 +2989,17 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb);
- if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
- "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
- qid);
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- }
-
- /* prepare the packet's descriptors to dma engine */
- rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
- &nb_hw_desc);
-
- /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
- * since the number of free descriptors in the queue is checked
- * after sending the previous packet. In case there isn't enough
- * space in the queue for the next packet, it is stopped
- * until there is again enough available space in the queue.
- * All other failure reasons of ena_com_prepare_tx() are fatal
- * and therefore require a device reset.
- */
- if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
- "failed to prepare tx bufs\n");
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.prepare_ctx_err++;
- u64_stats_update_end(&tx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ rc = ena_xmit_common(dev,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ skb->len);
+ if (rc)
goto error_unmap_dma;
- }
netdev_tx_sent_queue(txq, skb->len);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.cnt++;
- tx_ring->tx_stats.bytes += skb->len;
- u64_stats_update_end(&tx_ring->syncp);
-
- tx_info->tx_descs = nb_hw_desc;
- tx_info->last_jiffies = jiffies;
- tx_info->print_once = 0;
-
- tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
- tx_ring->ring_size);
-
/* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
@@ -2393,7 +3046,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
error_unmap_dma:
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
tx_info->skb = NULL;
error_drop_packet:
@@ -2572,6 +3225,7 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_bpf = ena_xdp,
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2951,7 +3605,9 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
int i, budget, rc;
+ int io_queue_count;
+ io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -2966,7 +3622,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
- for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
+ for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
@@ -2974,7 +3630,8 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (unlikely(rc))
return;
- rc = check_for_rx_interrupt_queue(adapter, rx_ring);
+ rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
@@ -2983,7 +3640,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
- adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
+ adapter->last_monitored_tx_qid = i % io_queue_count;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -3560,6 +4217,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
@@ -3573,7 +4233,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Failed to query interrupt moderation feature\n");
goto err_netdev_destroy;
}
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
netdev->netdev_ops = &ena_netdev_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index bffd778f2ce3..094324fd0edc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -36,6 +36,7 @@
#include <linux/bitops.h>
#include <linux/dim.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/inetdevice.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -142,6 +143,18 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM)
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -155,6 +168,8 @@ struct ena_napi {
struct napi_struct napi ____cacheline_aligned;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
+ struct ena_ring *xdp_ring;
+ bool first_interrupt;
u32 qid;
struct dim dim;
};
@@ -180,6 +195,17 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ /* The rx page for the rx buffer that was received in rx and
+ * re transmitted on xdp tx queues as a result of XDP_TX action.
+ * We need to free the page once we finished cleaning the buffer in
+ * clean_xdp_irq()
+ */
+ struct page *xdp_rx_page;
+
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
@@ -258,10 +284,13 @@ struct ena_ring {
struct ena_adapter *adapter;
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;
+ struct bpf_prog *xdp_bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
u16 next_to_use;
u16 next_to_clean;
u16 rx_copybreak;
+ u16 rx_headroom;
u16 qid;
u16 mtu;
u16 sgl_size;
@@ -379,6 +408,10 @@ struct ena_adapter {
u32 last_monitored_tx_qid;
enum ena_regs_reset_reason_types reset_reason;
+
+ struct bpf_prog *xdp_bpf_prog;
+ u32 xdp_first_ring;
+ u32 xdp_num_queues;
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -390,8 +423,48 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
+
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
+{
+ return adapter->xdp_first_ring != 0;
+}
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ab30761003da..cf3562e82ca9 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -527,7 +527,7 @@ int lance_close(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(lance_close);
-void lance_tx_timeout(struct net_device *dev)
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 741cdc392c6b..8266b3c1fefc 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -243,7 +243,7 @@ int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
-void lance_tx_timeout(struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
#endif
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 212fe72a190b..2f808dbc8b0e 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -118,10 +118,6 @@ struct lance_private {
int auto_select; /* cable-selection by carrier */
unsigned short busmaster_regval;
-#ifdef CONFIG_SUNLANCE
- struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
- int burst_sizes; /* ledma SBus burst sizes */
-#endif
struct timer_list multicast_timer;
struct net_device *dev;
};
@@ -522,7 +518,7 @@ static inline int lance_reset(struct net_device *dev)
return status;
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -551,11 +547,10 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
if (!lance_tx_buffs_avail(lp))
goto out_free;
-#ifdef DEBUG
/* dump the packet */
- print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, 64, true);
-#endif
+ print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
+ 64, true);
+
entry = lp->tx_new & lp->tx_ring_mod_mask;
ib->btx_ring[entry].length = (-skblen) | 0xf000;
ib->btx_ring[entry].misc = 0;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 0842da492a64..1c53408f5d47 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -422,7 +422,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
spin_unlock_irqrestore(&priv->chip_lock, flags);
}
-static void am79c961_timeout(struct net_device *dev)
+static void am79c961_timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
dev->name);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 573e88fc8ede..0f3b743425e8 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1569,7 +1569,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
* failed or the interface is locked up. This function will reinitialize
* the hardware.
*/
-static void amd8111e_tx_timeout(struct net_device *dev)
+static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int err;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4b6a5cb85dd2..5e0f645f5bde 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -530,7 +530,7 @@ static inline void ariadne_reset(struct net_device *dev)
netif_start_queue(dev);
}
-static void ariadne_tx_timeout(struct net_device *dev)
+static void ariadne_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index d3d44e07afbc..4e36122609a3 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -346,7 +346,7 @@ static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
static void set_multicast_list( struct net_device *dev );
static int lance_set_mac_address( struct net_device *dev, void *addr );
-static void lance_tx_timeout (struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
/************************* End of Prototypes **************************/
@@ -727,7 +727,7 @@ static void lance_init_ring( struct net_device *dev )
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
-static void lance_tx_timeout (struct net_device *dev)
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
struct lance_ioreg *IO = lp->iobase;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 1793950f0582..089a4fbc61a0 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1014,7 +1014,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
*/
-static void au1000_tx_timeout(struct net_device *dev)
+static void au1000_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
au1000_reset_mac(dev);
@@ -1053,23 +1053,12 @@ static void au1000_multicast_list(struct net_device *dev)
writel(reg, &aup->mac->control);
}
-static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL; /* PHY not controllable */
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static const struct net_device_ops au1000_netdev_ops = {
.ndo_open = au1000_open,
.ndo_stop = au1000_close,
.ndo_start_xmit = au1000_tx,
.ndo_set_rx_mode = au1000_multicast_list,
- .ndo_do_ioctl = au1000_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1161,7 +1150,7 @@ static int au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (struct mac_reg *)
- ioremap_nocache(base->start, resource_size(base));
+ ioremap(base->start, resource_size(base));
if (!aup->mac) {
dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
@@ -1169,7 +1158,7 @@ static int au1000_probe(struct platform_device *pdev)
}
/* Setup some variables for quick register address access */
- aup->enable = (u32 *)ioremap_nocache(macen->start,
+ aup->enable = (u32 *)ioremap(macen->start,
resource_size(macen));
if (!aup->enable) {
dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
@@ -1178,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mac_id = pdev->id;
- aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
+ aup->macdma = ioremap(macdma->start, resource_size(macdma));
if (!aup->macdma) {
dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
err = -ENXIO;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index dac4a2fcad6a..7282ce55ffb8 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -608,7 +608,7 @@ static int lance_rx(struct net_device *dev)
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == 0) {
+ if (!skb) {
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
@@ -884,7 +884,7 @@ static inline int lance_reset(struct net_device *dev)
return status;
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index f90b454b1642..aff44241988c 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -306,7 +306,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id);
static int lance_close(struct net_device *dev);
static struct net_device_stats *lance_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void lance_tx_timeout (struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
@@ -913,7 +913,7 @@ lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
}
-static void lance_tx_timeout (struct net_device *dev)
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = (struct lance_private *) dev->ml_priv;
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index c6c2a54c1121..c38edf6f03a3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -254,7 +254,7 @@ static int ni65_lance_reinit(struct net_device *dev);
static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
-static void ni65_timeout(struct net_device *dev);
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
static int ni65_close(struct net_device *dev);
static int ni65_alloc_buffer(struct net_device *dev);
static void ni65_free_buffer(struct priv *p);
@@ -1133,7 +1133,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
* kick xmitter ..
*/
-static void ni65_timeout(struct net_device *dev)
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct priv *p = dev->ml_priv;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9c152d85840d..023aecf6ab30 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -407,7 +407,7 @@ static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void mace_tx_timeout(struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static struct net_device_stats *mace_get_stats(struct net_device *dev);
static int mace_rx(struct net_device *dev, unsigned char RxCnt);
@@ -837,7 +837,7 @@ mace_start_xmit
failed, put skb back into a list."
---------------------------------------------------------------------------- */
-static void mace_tx_timeout(struct net_device *dev)
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5ad12c10934..dc7d88227e76 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -314,7 +314,7 @@ static int pcnet32_open(struct net_device *);
static int pcnet32_init_ring(struct net_device *);
static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
struct net_device *);
-static void pcnet32_tx_timeout(struct net_device *dev);
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t pcnet32_interrupt(int, void *);
static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
@@ -2455,7 +2455,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
lp->a->write_csr(ioaddr, CSR0, csr0_bits);
}
-static void pcnet32_tx_timeout(struct net_device *dev)
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr, flags;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ebcbf8ca4829..b00e00881253 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1097,7 +1097,7 @@ static void lance_piozero(void __iomem *dest, int len)
sbus_writeb(0, piobuf);
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 98f8f2033154..b71f9b04a51e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2152,7 +2152,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
-static void xgbe_tx_timeout(struct net_device *netdev)
+static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 128cd648ba99..46c3c1ca38d6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1227,7 +1227,7 @@ static bool xgbe_phy_sfp_verify_eeprom(u8 cc_in, u8 *buf, unsigned int len)
for (cc = 0; len; buf++, len--)
cc += *buf;
- return (cc == cc_in) ? true : false;
+ return cc == cc_in;
}
static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 02b4f3af02b5..c48f60996761 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -575,7 +575,7 @@ static void xge_free_pending_skb(struct net_device *ndev)
}
}
-static void xge_timeout(struct net_device *ndev)
+static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct xge_pdata *pdata = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d8612131c55e..6aee2f0fc0db 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -859,7 +859,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
return processed;
}
-static void xgene_enet_timeout(struct net_device *ndev)
+static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct netdev_queue *txq;
@@ -2020,7 +2020,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
int ret;
ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
+ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 8d03578d5e8c..95d3061c61be 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -91,7 +91,7 @@ static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
-static void mace_tx_timeout(struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void __mace_set_address(struct net_device *dev, void *addr);
/*
@@ -600,7 +600,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mace_tx_timeout(struct net_device *dev)
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 2bb329606794..6b27af0db499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -253,7 +253,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_free_aq_hw;
}
- self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+ self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
goto err_free_aq_hw;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 6f2c867785fe..17bda4e8cc45 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -781,18 +781,6 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
return 0;
}
-static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-
/**
* arc_emac_restart - Restart EMAC
* @ndev: Pointer to net_device structure.
@@ -857,7 +845,7 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
.ndo_set_rx_mode = arc_emac_set_rx_mode,
- .ndo_do_ioctl = arc_emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = arc_emac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 61a334d1b5e6..e95687a780fb 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1394,14 +1394,6 @@ err_drop:
return NETDEV_TX_OK;
}
-static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- if (!ndev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(ndev->phydev, ifr, cmd);
-}
-
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
struct ag71xx *ag = from_timer(ag, t, oom_timer);
@@ -1409,7 +1401,7 @@ static void ag71xx_oom_timer_handler(struct timer_list *t)
napi_schedule(&ag->napi);
}
-static void ag71xx_tx_timeout(struct net_device *ndev)
+static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ag71xx *ag = netdev_priv(ndev);
@@ -1618,7 +1610,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_do_ioctl = ag71xx_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1687,8 +1679,7 @@ static int ag71xx_probe(struct platform_device *pdev)
goto err_free;
}
- ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+ ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index d4bbcdfd691a..1dcbc486eca9 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1553,7 +1553,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
}
-static void alx_tx_timeout(struct net_device *dev)
+static void alx_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct alx_priv *alx = netdev_priv(dev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2b239ecea05f..4c0b1f8551dd 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -350,7 +350,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl1c_tx_timeout(struct net_device *netdev)
+static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7b65825c15..e0d89942d537 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -251,7 +251,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl1e_tx_timeout(struct net_device *netdev)
+static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 3aba38322717..b81a4e0c5b57 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1001,7 +1001,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl2_tx_timeout(struct net_device *netdev)
+static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 505a22c703f7..0941d07d0833 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -183,7 +183,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atlx_tx_timeout(struct net_device *netdev)
+static void atlx_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 30b455013bf3..bc273e0db7ff 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1005,18 +1005,13 @@ static int nb8800_stop(struct net_device *dev)
return 0;
}
-static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static const struct net_device_ops nb8800_netdev_ops = {
.ndo_open = nb8800_open,
.ndo_stop = nb8800_stop,
.ndo_start_xmit = nb8800_xmit,
.ndo_set_mac_address = nb8800_set_mac_address,
.ndo_set_rx_mode = nb8800_set_rx_mode,
- .ndo_do_ioctl = nb8800_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index ec25fd81985d..a780b7215021 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -948,7 +948,7 @@ irq_ack:
return IRQ_RETVAL(handled);
}
-static void b44_tx_timeout(struct net_device *dev)
+static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct b44 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 825af709708e..f07ac0e0af59 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1354,7 +1354,7 @@ out:
return ret;
}
-static void bcm_sysport_tx_timeout(struct net_device *dev)
+static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netdev_warn(dev, "transmit timeout!\n");
@@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
- priv->ring_map[q + port * num_tx_queues] = ring;
+ priv->ring_map[qp + port * num_tx_queues] = ring;
qp++;
}
@@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
struct net_device *slave_dev;
unsigned int num_tx_queues;
struct net_device *dev;
- unsigned int q, port;
+ unsigned int q, qp, port;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
if (priv->netdev != info->master)
@@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
continue;
ring->inspect = false;
- priv->ring_map[q + port * num_tx_queues] = NULL;
+ qp = ring->switch_queue;
+ priv->ring_map[qp + port * num_tx_queues] = NULL;
}
return 0;
@@ -2427,6 +2428,14 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (!of_id || !of_id->data)
return -EINVAL;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
/* Fairly quickly we need to know the type of adapter we have */
params = of_id->data;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 148734b166f0..1bb07a5d82c9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1248,14 +1248,6 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
-static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(net_dev))
- return -EINVAL;
-
- return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
-}
-
static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
@@ -1263,7 +1255,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_rx_mode = bgmac_set_rx_mode,
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = bgmac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
};
/**************************************************
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index fbc196b480b6..dbb7874607ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6575,7 +6575,7 @@ bnx2_dump_state(struct bnx2 *bp)
}
static void
-bnx2_tx_timeout(struct net_device *dev)
+bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5e037a305b83..ee9e9290f112 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4970,7 +4970,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-void bnx2x_tx_timeout(struct net_device *dev)
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3f63ffd7561b..6f1352d51cb2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -617,7 +617,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
*
* @dev: net device
*/
-void bnx2x_tx_timeout(struct net_device *dev);
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue);
/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
* c2s_map should have BNX2X_MAX_PRIORITY entries.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index cff64e43bdd8..1c26fa962233 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14053,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = -ENOMEM;
goto init_one_freemem;
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ bp->doorbells = ioremap(pci_resource_start(pdev, 2),
doorbell_size);
}
if (!bp->doorbells) {
@@ -15410,6 +15410,7 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c779f9cf8822..483935b001c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -944,6 +944,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
+ page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -6997,7 +6998,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
- bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
@@ -7288,6 +7288,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
bp->chip_num = le16_to_cpu(resp->chip_num);
+ bp->chip_rev = resp->chip_rev;
if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
!resp->chip_metal)
bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
@@ -9063,7 +9064,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!netif_carrier_ok(bp->dev))
+ if (!bp->link_info.link_up)
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -9975,7 +9976,7 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
}
}
-static void bnxt_tx_timeout(struct net_device *dev)
+static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnxt *bp = netdev_priv(dev);
@@ -10040,6 +10041,13 @@ static void bnxt_timer(struct timer_list *t)
bnxt_queue_sp_work(bp);
}
+#ifdef CONFIG_RFS_ACCEL
+ if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+#endif /*CONFIG_RFS_ACCEL*/
+
if (bp->link_info.phy_retry) {
if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
bp->link_info.phy_retry = false;
@@ -10050,7 +10058,8 @@ static void bnxt_timer(struct timer_list *t)
}
}
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
+ netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10568,7 +10577,7 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -10822,6 +10831,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
break;
@@ -11065,11 +11075,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
- keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
- keys1->ports.ports == keys2->ports.ports &&
- keys1->basic.ip_proto == keys2->basic.ip_proto &&
- keys1->basic.n_proto == keys2->basic.n_proto &&
+ if (keys1->basic.n_proto != keys2->basic.n_proto ||
+ keys1->basic.ip_proto != keys2->basic.ip_proto)
+ return false;
+
+ if (keys1->basic.n_proto == htons(ETH_P_IP)) {
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ return false;
+ } else {
+ if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src)) ||
+ memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst)))
+ return false;
+ }
+
+ if (keys1->ports.ports == keys2->ports.ports &&
keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
@@ -11087,6 +11109,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
+ u32 flags;
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -11126,8 +11149,9 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
- bp->hwrm_spec_code < 0x10601) {
+ flags = fkeys->control.flags;
+ if (((flags & FLOW_DIS_ENCAPSULATION) &&
+ bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
@@ -11361,11 +11385,11 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return -EOPNOTSUPP;
/* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
return -EOPNOTSUPP;
- ppid->id_len = sizeof(bp->switch_id);
- memcpy(ppid->id, bp->switch_id, ppid->id_len);
+ ppid->id_len = sizeof(bp->dsn);
+ memcpy(ppid->id, bp->dsn, ppid->id_len);
return 0;
}
@@ -11421,9 +11445,9 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true);
- bnxt_dl_unregister(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
@@ -11457,6 +11481,9 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
rc);
return rc;
}
+ if (!fw_dflt)
+ return 0;
+
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -11470,9 +11497,6 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
link_info->support_auto_speeds = link_info->support_speeds;
- if (!fw_dflt)
- return 0;
-
bnxt_init_ethtool_link_settings(bp);
return 0;
}
@@ -11734,6 +11758,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
put_unaligned_le32(dw, &dsn[0]);
pci_read_config_dword(pdev, pos + 4, &dw);
put_unaligned_le32(dw, &dsn[4]);
+ bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
@@ -11845,9 +11870,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp)) {
/* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto init_err_pci_clean;
+ rc = bnxt_pcie_dsn_get(bp, bp->dsn);
}
/* MTU range: 60 - FW defined max */
@@ -11894,11 +11917,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_init_tc(bp);
}
+ bnxt_dl_register(bp);
+
rc = register_netdev(dev);
if (rc)
- goto init_err_cleanup_tc;
+ goto init_err_cleanup;
- bnxt_dl_register(bp);
+ if (BNXT_PF(bp))
+ devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
@@ -11908,7 +11934,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-init_err_cleanup_tc:
+init_err_cleanup:
+ bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 505af5cfb1bd..cabef0b4f5fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,8 @@ struct bnxt {
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
+ u8 chip_rev;
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1532,6 +1534,7 @@ struct bnxt {
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
@@ -1845,7 +1848,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
- u8 switch_id[8];
+ u8 dsn[8];
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
struct notifier_block tc_netdev_nb;
@@ -1936,9 +1939,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type)
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_ALLOC:
- case HWRM_CFA_NTUPLE_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_CFG:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 3eedd4477218..eec0168330b7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,6 +21,7 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
const char *region, struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
if (region)
return -EOPNOTSUPP;
@@ -31,7 +32,18 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
return -EPERM;
}
- return bnxt_flash_package_from_file(bp->dev, filename, 0);
+ devlink_flash_update_begin_notify(dl);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
+ 0);
+ rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
+ if (!rc)
+ devlink_flash_update_status_notify(dl, "Flashing done", region,
+ 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flashing failed",
+ region, 0, 0);
+ devlink_flash_update_end_notify(dl);
+ return rc;
}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -89,7 +101,7 @@ static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
return -EOPNOTSUPP;
bnxt_fw_reset(bp);
- return 0;
+ return -EINPROGRESS;
}
static const
@@ -116,7 +128,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
bnxt_fw_exception(bp);
- return 0;
+ return -EINPROGRESS;
}
static const
@@ -262,11 +274,25 @@ void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
health->fatal = false;
}
+void bnxt_dl_health_recovery_done(struct bnxt *bp)
+{
+ struct bnxt_fw_health *hlth = bp->fw_health;
+
+ if (hlth->fatal)
+ devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
+ else
+ devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get,
#endif /* CONFIG_BNXT_SRIOV */
+ .info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
};
@@ -333,6 +359,136 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
dst->vu8 = (u8)val32;
}
+static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
+ union devlink_param_value *nvm_cfg_ver)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ union bnxt_nvm_data *data;
+ dma_addr_t data_dma_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+ &data_dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ req.dest_data_addr = cpu_to_le64(data_dma_addr);
+ req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
+ req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
+ BNXT_NVM_CFG_VER_BITS,
+ BNXT_NVM_CFG_VER_BYTES);
+
+ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+ return rc;
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ union devlink_param_value nvm_cfg_ver;
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char fw_ver[FW_VER_STR_LEN];
+ char buf[32];
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
+ if (rc)
+ return rc;
+
+ sprintf(buf, "%X", bp->chip_num);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc)
+ return rc;
+
+ ver_resp = &bp->ver_resp;
+ sprintf(buf, "%X", ver_resp->chip_rev);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc)
+ return rc;
+
+ if (BNXT_PF(bp)) {
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
+ bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (strlen(ver_resp->active_pkg_name)) {
+ rc =
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ ver_resp->active_pkg_name);
+ if (rc)
+ return rc;
+ }
+
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+ u32 ver = nvm_cfg_ver.vu32;
+
+ sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
+ ver & 0xF);
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
+ if (rc)
+ return rc;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
{
@@ -475,15 +631,48 @@ static const struct devlink_param bnxt_dl_params[] = {
static const struct devlink_param bnxt_dl_port_params[] = {
};
-int bnxt_dl_register(struct bnxt *bp)
+static int bnxt_dl_params_register(struct bnxt *bp)
{
- struct devlink *dl;
int rc;
- if (bp->hwrm_spec_code < 0x10600) {
- netdev_warn(bp->dev, "Firmware does not support NVM params");
- return -ENOTSUPP;
+ if (bp->hwrm_spec_code < 0x10600)
+ return 0;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ return rc;
}
+ rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+ if (rc) {
+ netdev_err(bp->dev, "devlink_port_params_register failed");
+ devlink_params_unregister(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ return rc;
+ }
+ devlink_params_publish(bp->dl);
+
+ return 0;
+}
+
+static void bnxt_dl_params_unregister(struct bnxt *bp)
+{
+ if (bp->hwrm_spec_code < 0x10600)
+ return;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+}
+
+int bnxt_dl_register(struct bnxt *bp)
+{
+ struct devlink *dl;
+ int rc;
if (BNXT_PF(bp))
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
@@ -510,40 +699,23 @@ int bnxt_dl_register(struct bnxt *bp)
if (!BNXT_PF(bp))
return 0;
- rc = devlink_params_register(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
- netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
- rc);
- goto err_dl_unreg;
- }
-
devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- bp->pf.port_id, false, 0,
- bp->switch_id, sizeof(bp->switch_id));
+ bp->pf.port_id, false, 0, bp->dsn,
+ sizeof(bp->dsn));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
- goto err_dl_param_unreg;
+ goto err_dl_unreg;
}
- devlink_port_type_eth_set(&bp->dl_port, bp->dev);
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed");
+ rc = bnxt_dl_params_register(bp);
+ if (rc)
goto err_dl_port_unreg;
- }
-
- devlink_params_publish(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_param_unreg:
- devlink_params_unregister(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
err_dl_unreg:
devlink_unregister(dl);
err_dl_free:
@@ -560,12 +732,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
return;
if (BNXT_PF(bp)) {
- devlink_port_params_unregister(&bp->dl_port,
- bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
+ bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
- devlink_params_unregister(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
}
devlink_unregister(dl);
devlink_free(dl);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 6db6c3dac472..95f893f2a74d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -38,6 +38,10 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_IGNORE_ARI 164
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_NVM_CFG_VER 602
+
+#define BNXT_NVM_CFG_VER_BITS 24
+#define BNXT_NVM_CFG_VER_BYTES 4
#define BNXT_MSIX_VEC_MAX 1280
#define BNXT_MSIX_VEC_MIN_MAX 128
@@ -58,6 +62,7 @@ struct bnxt_dl_nvm_param {
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 08d56ec7b68a..6171fa8b3677 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1462,15 +1462,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
ethtool_link_ksettings_add_link_mode(lk_ksettings,
advertising, Autoneg);
base->autoneg = AUTONEG_ENABLE;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ base->duplex = DUPLEX_UNKNOWN;
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ else
+ base->duplex = DUPLEX_HALF;
+ }
ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
- if (!netif_carrier_ok(dev))
- base->duplex = DUPLEX_UNKNOWN;
- else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
- else
- base->duplex = DUPLEX_HALF;
} else {
base->autoneg = AUTONEG_DISABLE;
ethtool_speed =
@@ -2707,7 +2707,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (netif_carrier_ok(bp->dev))
+ if (bp->link_info.link_up)
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index f9bf7d7250ab..b010b34cdaf8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
struct net_device *dev;
int rc, i;
+ if (!(bp->flags & BNXT_FLAG_DSN_VALID))
+ return -ENODEV;
+
bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
if (!bp->vf_reps)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 120fa05a39ff..e50a15397e11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2019 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -508,8 +508,8 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
return phy_ethtool_ksettings_set(dev->phydev, cmd);
}
-static int bcmgenet_set_rx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 rbuf_chk_ctrl;
@@ -521,7 +521,7 @@ static int bcmgenet_set_rx_csum(struct net_device *dev,
/* enable rx checksumming */
if (rx_csum_en)
- rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
else
rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
priv->desc_rxchk_en = rx_csum_en;
@@ -535,12 +535,10 @@ static int bcmgenet_set_rx_csum(struct net_device *dev,
rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
-
- return 0;
}
-static int bcmgenet_set_tx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
bool desc_64b_en;
@@ -549,7 +547,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
- desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
if (desc_64b_en) {
@@ -563,21 +561,27 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
-
- return 0;
}
static int bcmgenet_set_features(struct net_device *dev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ dev->features;
- netdev_features_t wanted = dev->wanted_features;
- int ret = 0;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
- if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
- ret = bcmgenet_set_tx_csum(dev, wanted);
- if (changed & (NETIF_F_RXCSUM))
- ret = bcmgenet_set_rx_csum(dev, wanted);
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ bcmgenet_set_tx_csum(dev, features);
+ bcmgenet_set_rx_csum(dev, features);
+
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -857,6 +861,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
+ mib.tx_realloc_tsb_failed),
/* Per TX queues */
STAT_GENET_Q(0),
STAT_GENET_Q(1),
@@ -1218,18 +1225,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
}
-/* ioctl handle special commands that are not present in ethtool. */
-static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *ring)
{
@@ -1483,6 +1478,7 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
struct sk_buff *skb)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
u16 offset;
@@ -1495,12 +1491,15 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
* enough headroom for us to insert 64B status block.
*/
new_skb = skb_realloc_headroom(skb, sizeof(*status));
- dev_kfree_skb(skb);
if (!new_skb) {
+ dev_kfree_skb_any(skb);
+ priv->mib.tx_realloc_tsb_failed++;
dev->stats.tx_dropped++;
return NULL;
}
+ dev_consume_skb_any(skb);
skb = new_skb;
+ priv->mib.tx_realloc_tsb++;
}
skb_push(skb, sizeof(*status));
@@ -1516,24 +1515,19 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
ip_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
- return skb;
+ /* don't use UDP flag */
+ ip_proto = 0;
+ break;
}
offset = skb_checksum_start_offset(skb) - sizeof(*status);
tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
- (offset + skb->csum_offset);
+ (offset + skb->csum_offset) |
+ STATUS_TX_CSUM_LV;
- /* Set the length valid bit for TCP and UDP and just set
- * the special UDP flag for IPv4, else just set to 0.
- */
- if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
- tx_csum_info |= STATUS_TX_CSUM_LV;
- if (ip_proto == IPPROTO_UDP &&
- ip_ver == htons(ETH_P_IP))
- tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
- } else {
- tx_csum_info = 0;
- }
+ /* Set the special UDP flag for UDP */
+ if (ip_proto == IPPROTO_UDP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
status->tx_csum_info = tx_csum_info;
}
@@ -1744,7 +1738,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int bytes_processed = 0;
unsigned int p_index, mask;
unsigned int discards;
- unsigned int chksum_ok = 0;
/* Clear status before servicing to reduce spurious interrupts */
if (ring->index == DESC_INDEX) {
@@ -1795,9 +1788,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dmadesc_get_length_status(priv, cb->bd_addr);
} else {
struct status_64 *status;
+ __be16 rx_csum;
status = (struct status_64 *)skb->data;
dma_length_status = status->length_status;
+ rx_csum = (__force __be16)(status->rx_csum & 0xffff);
+ if (priv->desc_rxchk_en) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
@@ -1840,18 +1839,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
goto next;
} /* error packet */
- chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
- priv->desc_rxchk_en;
-
skb_put(skb, len);
if (priv->desc_64b_en) {
skb_pull(skb, 64);
len -= 64;
}
- if (likely(chksum_ok))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
/* remove hardware 2bytes added for IP alignment */
skb_pull(skb, 2);
len -= 2;
@@ -2164,8 +2157,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+ NAPI_POLL_WEIGHT);
}
/* Initialize a RDMA ring */
@@ -2886,9 +2879,10 @@ static int bcmgenet_open(struct net_device *dev)
init_umac(priv);
- /* Make sure we reflect the value of CRC_CMD_FWD */
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+ /* Apply features again in case we changed them while interface was
+ * down
+ */
+ bcmgenet_set_features(dev, dev->features);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
@@ -3055,7 +3049,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
ring->cb_ptr, ring->end_ptr);
}
-static void bcmgenet_timeout(struct net_device *dev)
+static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 int0_enable = 0;
@@ -3216,7 +3210,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_tx_timeout = bcmgenet_timeout,
.ndo_set_rx_mode = bcmgenet_set_rx_mode,
.ndo_set_mac_address = bcmgenet_set_mac_addr,
- .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
@@ -3327,19 +3321,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
}
/* enum genet_version starts at 1 */
@@ -3535,9 +3525,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
- /* Set hardware features */
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ /* Set default features */
+ dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
@@ -3574,6 +3566,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
+ err = -EIO;
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err;
+
/* Mii wait queue */
init_waitqueue_head(&priv->wq);
/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
@@ -3689,6 +3689,9 @@ static int bcmgenet_resume(struct device *d)
genphy_config_aneg(dev->phydev);
bcmgenet_mii_config(priv->dev, false);
+ /* Restore enabled features */
+ bcmgenet_set_features(dev, dev->features);
+
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index a5659197598f..61a6fe9f4cec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -144,6 +144,8 @@ struct bcmgenet_mib_counters {
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
+ u32 tx_realloc_tsb;
+ u32 tx_realloc_tsb_failed;
};
#define UMAC_HD_BKP_CTRL 0x004
@@ -251,6 +253,7 @@ struct bcmgenet_mib_counters {
#define RBUF_CHK_CTRL 0x14
#define RBUF_RXCHK_EN (1 << 0)
#define RBUF_SKIP_FCS (1 << 4)
+#define RBUF_L3_PARSE_DIS (1 << 5)
#define RBUF_ENERGY_CTRL 0x9c
#define RBUF_EEE_EN (1 << 0)
@@ -663,7 +666,6 @@ struct bcmgenet_priv {
bool desc_rxchk_en;
bool crc_fwd_en;
- unsigned int dma_rx_chk_bit;
u32 dma_max_burst_length;
u32 msg_enable;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 1604ad32e920..5b4568c2ad1c 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -294,7 +294,7 @@ static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
enum sbmac_fc fc);
static int sbmac_open(struct net_device *dev);
-static void sbmac_tx_timeout (struct net_device *dev);
+static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void sbmac_set_rx_mode(struct net_device *dev);
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sbmac_close(struct net_device *dev);
@@ -2419,7 +2419,7 @@ static void sbmac_mii_poll(struct net_device *dev)
}
-static void sbmac_tx_timeout (struct net_device *dev)
+static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
@@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev)
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
BUG_ON(!res);
- sbm_base = ioremap_nocache(res->start, resource_size(res));
+ sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
dev_name(&pldev->dev));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ca3aa1250dd1..88466255bf66 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7645,7 +7645,7 @@ static void tg3_poll_controller(struct net_device *dev)
}
#endif
-static void tg3_tx_timeout(struct net_device *dev)
+static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
struct netdev_queue *txq, struct sk_buff *skb)
{
- struct sk_buff *segs, *nskb;
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+ struct sk_buff *segs, *seg, *next;
/* Estimate the number of fragments in the worst case */
if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
@@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
if (IS_ERR(segs) || !segs)
goto tg3_tso_bug_end;
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- tg3_start_xmit(nskb, tp->dev);
- } while (segs);
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+ tg3_start_xmit(seg, tp->dev);
+ }
tg3_tso_bug_end:
dev_consume_skb_any(skb);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 4042c2185e98..e17bfc87da90 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1124,11 +1124,10 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
static void
bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
{
- u32 pgnum, pgoff, loff = 0;
+ u32 pgnum, loff = 0;
int i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index e338272931d1..01a50a4b2113 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3477,7 +3477,7 @@ bnad_init(struct bnad *bnad,
bnad->pcidev = pdev;
bnad->mmio_start = pci_resource_start(pdev, 0);
bnad->mmio_len = pci_resource_len(pdev, 0);
- bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
if (!bnad->bar0) {
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 19fe4f4867c7..dbf7070fcdba 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -630,10 +630,17 @@
#define GEM_CLK_DIV96 5
/* Constants for MAN register */
-#define MACB_MAN_SOF 1
-#define MACB_MAN_WRITE 1
-#define MACB_MAN_READ 2
-#define MACB_MAN_CODE 2
+#define MACB_MAN_C22_SOF 1
+#define MACB_MAN_C22_WRITE 1
+#define MACB_MAN_C22_READ 2
+#define MACB_MAN_C22_CODE 2
+
+#define MACB_MAN_C45_SOF 0
+#define MACB_MAN_C45_ADDR 0
+#define MACB_MAN_C45_WRITE 1
+#define MACB_MAN_C45_POST_READ_INCR 2
+#define MACB_MAN_C45_READ 3
+#define MACB_MAN_C45_CODE 2
/* Capability mask bits */
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a0503b99dc79..7a2fe63d1136 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -337,11 +337,30 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
- | MACB_BF(RW, MACB_MAN_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_CODE)));
+ if (regnum & MII_ADDR_C45) {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+ } else {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -370,12 +389,32 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
- | MACB_BF(RW, MACB_MAN_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_CODE)
- | MACB_BF(DATA, value)));
+ if (regnum & MII_ADDR_C45) {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
+ } else {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -611,21 +650,24 @@ static const struct phylink_mac_ops macb_phylink_ops = {
.mac_link_up = macb_mac_link_up,
};
+static bool macb_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
static int macb_phylink_connect(struct macb *bp)
{
+ struct device_node *dn = bp->pdev->dev.of_node;
struct net_device *dev = bp->dev;
struct phy_device *phydev;
int ret;
- if (bp->pdev->dev.of_node &&
- of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
- ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
- 0);
- if (ret) {
- netdev_err(dev, "Could not attach PHY (%d)\n", ret);
- return ret;
- }
- } else {
+ if (dn)
+ ret = phylink_of_phy_connect(bp->phylink, dn, 0);
+
+ if (!dn || (ret && !macb_phy_handle_exists(dn))) {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
@@ -634,10 +676,11 @@ static int macb_phylink_connect(struct macb *bp)
/* attach the mac to the phy */
ret = phylink_connect_phy(bp->phylink, phydev);
- if (ret) {
- netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
- return ret;
- }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
}
phylink_start(bp->phylink);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index af04a2c81adb..05a3d067c3fc 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1251,7 +1251,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void xgmac_tx_timeout(struct net_device *dev)
+static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct xgmac_priv *priv = netdev_priv(dev);
schedule_work(&priv->tx_timeout_work);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7f3b2e3b0868..eab05b5534ea 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2562,7 +2562,7 @@ lio_xmit_failed:
/** \brief Network device Tx timeout
* @param netdev pointer to network device
*/
-static void liquidio_tx_timeout(struct net_device *netdev)
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct lio *lio;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 370d76822ee0..7a77544a54f5 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1628,7 +1628,7 @@ lio_xmit_failed:
/** \brief Network device Tx timeout
* @param netdev pointer to network device
*/
-static void liquidio_tx_timeout(struct net_device *netdev)
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct lio *lio;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index f3f2e71431ac..600de587d7a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -31,7 +31,7 @@ static int lio_vf_rep_open(struct net_device *ndev);
static int lio_vf_rep_stop(struct net_device *ndev);
static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
struct net_device *ndev);
-static void lio_vf_rep_tx_timeout(struct net_device *netdev);
+static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int lio_vf_rep_phys_port_name(struct net_device *dev,
char *buf, size_t len);
static void lio_vf_rep_get_stats64(struct net_device *dev,
@@ -172,7 +172,7 @@ lio_vf_rep_stop(struct net_device *ndev)
}
static void
-lio_vf_rep_tx_timeout(struct net_device *ndev)
+lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netif_trans_update(ndev);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index cdd7e5da4a74..e9575887a4f8 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -790,9 +790,7 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
case SIOCSHWTSTAMP:
return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
default:
- if (netdev->phydev)
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
- return -EINVAL;
+ return phy_do_ioctl(netdev, rq, cmd);
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f28409279ea4..016957285f99 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1741,7 +1741,7 @@ static void nicvf_get_stats64(struct net_device *netdev,
}
-static void nicvf_tx_timeout(struct net_device *dev)
+static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct nicvf *nic = netdev_priv(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 58f89f6a040f..883cfa9c4b6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (!is_offload(adapter))
return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
@@ -3265,7 +3267,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
- adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+ adapter->regs = ioremap(mmio_start, mmio_len);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index becee29f5df7..8b7d156f79d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -56,6 +56,7 @@
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
+#include "t4fw_api.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
@@ -68,6 +69,16 @@ extern struct mutex uld_mutex;
#define ETHTXQ_STOP_THRES \
(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index aca9f7a20a2a..9d1f2f88b945 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = seq_tab_get_idx(seq->private, *pos + 1);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
@@ -3175,14 +3174,12 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
- unsigned int tid_start = 0;
struct adapter *adap = seq->private;
- const struct tid_info *t = &adap->tids;
- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
-
- if (chip > CHELSIO_T5)
- tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+ const struct tid_info *t;
+ enum chip_type chip;
+ t = &adap->tids;
+ chip = CHELSIO_CHIP_VERSION(adap->params.chip);
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -3194,9 +3191,9 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", t->tid_base,
sb - 1, adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
@@ -3205,14 +3202,14 @@ static int tid_info_show(struct seq_file *seq, void *v)
t->aftid_base,
t->aftid_end,
adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
} else {
seq_printf(seq, "TID range: %u..%u",
adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->hash_tids_in_use));
}
@@ -3220,8 +3217,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: %u..%u", tid_start,
- tid_start + t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", t->tid_base,
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -3244,6 +3241,9 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
t->sftid_base, t->sftid_base + t->nsftids - 2,
t->sftids_in_use);
+ if (t->nhpftids)
+ seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
+ t->hpftid_base + t->nhpftids - 1);
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 1d39fca11810..2a2938bbb93a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -361,20 +361,22 @@ static int get_filter_count(struct adapter *adapter, unsigned int fidx,
tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
if (is_hashfilter(adapter) && hash) {
- if (fidx < adapter->tids.ntids) {
- f = adapter->tids.tid_tab[fidx];
- if (!f)
- return -EINVAL;
- } else {
+ if (tid_out_of_range(&adapter->tids, fidx))
return -E2BIG;
- }
+ f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
+ if (!f)
+ return -EINVAL;
} else {
- if ((fidx != (adapter->tids.nftids +
- adapter->tids.nsftids - 1)) &&
- fidx >= adapter->tids.nftids)
+ if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
+ adapter->tids.nhpftids - 1)) &&
+ fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
return -E2BIG;
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx -
+ adapter->tids.nhpftids];
if (!f->valid)
return -EINVAL;
}
@@ -480,6 +482,7 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
ftid -= n;
}
spin_unlock_bh(&t->ftid_lock);
+ ftid += t->nhpftids;
return found ? ftid : -ENOMEM;
}
@@ -507,6 +510,24 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
return 0;
}
+static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->hpftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->hpftid_bmap);
+ else
+ bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
unsigned int chip_ver)
{
@@ -522,33 +543,58 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
+static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (family == PF_INET)
+ __clear_bit(fidx, t->hpftid_bmap);
+ else
+ bitmap_release_region(t->hpftid_bmap, fidx, 1);
+
+ spin_unlock_bh(&t->ftid_lock);
+}
+
bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
{
+ struct filter_entry *prev_fe, *next_fe, *tab;
struct adapter *adap = netdev2adap(dev);
- struct filter_entry *prev_fe, *next_fe;
+ u32 prev_ftid, next_ftid, max_tid;
struct tid_info *t = &adap->tids;
- u32 prev_ftid, next_ftid;
+ unsigned long *bmap;
bool valid = true;
+ if (idx < t->nhpftids) {
+ bmap = t->hpftid_bmap;
+ tab = t->hpftid_tab;
+ max_tid = t->nhpftids;
+ } else {
+ idx -= t->nhpftids;
+ bmap = t->ftid_bmap;
+ tab = t->ftid_tab;
+ max_tid = t->nftids;
+ }
+
/* Only insert the rule if both of the following conditions
* are met:
* 1. The immediate previous rule has priority <= @prio.
* 2. The immediate next rule has priority >= @prio.
*/
spin_lock_bh(&t->ftid_lock);
+
/* Don't insert if there's a rule already present at @idx. */
- if (test_bit(idx, t->ftid_bmap)) {
+ if (test_bit(idx, bmap)) {
valid = false;
goto out_unlock;
}
- next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
- if (next_ftid >= t->nftids)
+ next_ftid = find_next_bit(bmap, max_tid, idx);
+ if (next_ftid >= max_tid)
next_ftid = idx;
- next_fe = &adap->tids.ftid_tab[next_ftid];
+ next_fe = &tab[next_ftid];
- prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ prev_ftid = find_last_bit(bmap, idx);
if (prev_ftid >= idx)
prev_ftid = idx;
@@ -558,13 +604,13 @@ bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
* accordingly.
*/
if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
- prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
+ prev_fe = &tab[prev_ftid & ~0x3];
if (!prev_fe->fs.type)
- prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ prev_fe = &tab[prev_ftid];
} else {
- prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
+ prev_fe = &tab[prev_ftid & ~0x1];
if (!prev_fe->fs.type)
- prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ prev_fe = &tab[prev_ftid];
}
if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
@@ -579,11 +625,16 @@ out_unlock:
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter_wr *fwr;
+ struct filter_entry *f;
struct sk_buff *skb;
unsigned int len;
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
+
len = sizeof(*fwr);
skb = alloc_skb(len, GFP_KERNEL);
@@ -609,10 +660,15 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
*/
int set_filter_wr(struct adapter *adapter, int fidx)
{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter2_wr *fwr;
+ struct filter_entry *f;
struct sk_buff *skb;
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
+
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -762,10 +818,14 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
struct filter_entry *f;
int ret;
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
+ adapter->tids.nhpftids)
return -EINVAL;
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
ret = writable_filter(f);
if (ret)
return ret;
@@ -811,12 +871,22 @@ void clear_all_filters(struct adapter *adapter)
struct net_device *dev = adapter->port[0];
unsigned int i;
+ if (adapter->tids.hpftid_tab) {
+ struct filter_entry *f = &adapter->tids.hpftid_tab[0];
+
+ for (i = 0; i < adapter->tids.nhpftids; i++, f++)
+ if (f->valid || f->pending)
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
unsigned int max_ftid = adapter->tids.nftids +
- adapter->tids.nsftids;
+ adapter->tids.nsftids +
+ adapter->tids.nhpftids;
+
/* Clear all TCAM filters */
- for (i = 0; i < max_ftid; i++, f++)
+ for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
if (f->valid || f->pending)
cxgb4_del_filter(dev, i, &f->fs);
}
@@ -1319,17 +1389,17 @@ out_err:
* filter specification in order to facilitate signaling completion of the
* operation.
*/
-int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+int __cxgb4_set_filter(struct net_device *dev, int ftid,
struct ch_filter_specification *fs,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
- unsigned int max_fidx, fidx;
- struct filter_entry *f;
+ unsigned int max_fidx, fidx, chip_ver;
+ int iq, ret, filter_id = ftid;
+ struct filter_entry *f, *tab;
u32 iconf;
- int iq, ret;
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
if (fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_set_hash_filter(dev, fs, ctx);
@@ -1338,7 +1408,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return -EINVAL;
}
- max_fidx = adapter->tids.nftids;
+ max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
return -E2BIG;
@@ -1353,6 +1423,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (iq < 0)
return iq;
+ if (fs->prio) {
+ tab = &adapter->tids.hpftid_tab[0];
+ } else {
+ tab = &adapter->tids.ftid_tab[0];
+ filter_id = ftid - adapter->tids.nhpftids;
+ }
+
/* IPv6 filters occupy four slots and must be aligned on
* four-slot boundaries. IPv4 filters only occupy a single
* slot and have no alignment requirements but writing a new
@@ -1373,9 +1450,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
else
fidx = filter_id & ~0x1;
- if (fidx != filter_id &&
- adapter->tids.ftid_tab[fidx].fs.type) {
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx != filter_id && tab[fidx].fs.type) {
+ f = &tab[fidx];
if (f->valid) {
dev_err(adapter->pdev_dev,
"Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
@@ -1399,7 +1475,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
*/
for (fidx = filter_id + 1; fidx < filter_id + 4;
fidx++) {
- f = &adapter->tids.ftid_tab[fidx];
+ f = &tab[fidx];
if (f->valid) {
dev_err(adapter->pdev_dev,
"Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
@@ -1415,7 +1491,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return -EINVAL;
/* Check overlapping IPv4 filter slot */
fidx = filter_id + 1;
- f = &adapter->tids.ftid_tab[fidx];
+ f = &tab[fidx];
if (f->valid) {
pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
__func__, fidx);
@@ -1427,36 +1503,35 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
/* Check to make sure that provided filter index is not
* already in use by someone else
*/
- f = &adapter->tids.ftid_tab[filter_id];
+ f = &tab[filter_id];
if (f->valid)
return -EBUSY;
- fidx = filter_id + adapter->tids.ftid_base;
- ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET,
- chip_ver);
+ if (fs->prio) {
+ fidx = filter_id + adapter->tids.hpftid_base;
+ ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ } else {
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
+ }
+
if (ret)
return ret;
/* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
- if (ret) {
- /* Clear the bits we have set above */
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET,
- chip_ver);
- return ret;
- }
+ if (ret)
+ goto free_tid;
if (is_t6(adapter->params.chip) && fs->type &&
ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
IPV6_ADDR_ANY) {
ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
- if (ret) {
- cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
- chip_ver);
- return ret;
- }
+ if (ret)
+ goto free_tid;
}
/* Convert the filter specification into our internal format.
@@ -1487,7 +1562,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
f->fs.mask.vni,
0, 1, 1);
if (ret < 0)
- goto free_clip;
+ goto free_tid;
f->fs.val.ovlan = ret;
f->fs.mask.ovlan = 0x1ff;
@@ -1501,21 +1576,22 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
*/
f->ctx = ctx;
f->tid = fidx; /* Save the actual tid */
- ret = set_filter_wr(adapter, filter_id);
- if (ret) {
+ ret = set_filter_wr(adapter, ftid);
+ if (ret)
+ goto free_tid;
+
+ return ret;
+
+free_tid:
+ if (f->fs.prio)
+ cxgb4_clear_hpftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ else
cxgb4_clear_ftid(&adapter->tids, filter_id,
fs->type ? PF_INET6 : PF_INET,
chip_ver);
- clear_filter(adapter, f);
- }
-
- return ret;
-free_clip:
- if (is_t6(adapter->params.chip) && f->fs.type)
- cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET, chip_ver);
+ clear_filter(adapter, f);
return ret;
}
@@ -1537,7 +1613,7 @@ static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
__func__, filter_id, adapter->tids.nftids);
- if (filter_id > adapter->tids.ntids)
+ if (tid_out_of_range(t, filter_id))
return -E2BIG;
f = lookup_tid(t, filter_id);
@@ -1590,11 +1666,11 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int max_fidx, chip_ver;
struct filter_entry *f;
- unsigned int max_fidx;
int ret;
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
if (fs && fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_del_hash_filter(dev, filter_id, ctx);
@@ -1603,21 +1679,31 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
return -EINVAL;
}
- max_fidx = adapter->tids.nftids;
+ max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
return -E2BIG;
- f = &adapter->tids.ftid_tab[filter_id];
+ if (filter_id < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[filter_id];
+ else
+ f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+
ret = writable_filter(f);
if (ret)
return ret;
if (f->valid) {
f->ctx = ctx;
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET,
- chip_ver);
+ if (f->fs.prio)
+ cxgb4_clear_hpftid(&adapter->tids,
+ f->tid - adapter->tids.hpftid_base,
+ f->fs.type ? PF_INET6 : PF_INET);
+ else
+ cxgb4_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
@@ -1842,11 +1928,18 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
max_fidx = adap->tids.nftids + adap->tids.nsftids;
/* Get the corresponding filter entry for this tid */
if (adap->tids.ftid_tab) {
- /* Check this in normal filter region */
- idx = tid - adap->tids.ftid_base;
- if (idx >= max_fidx)
- return;
- f = &adap->tids.ftid_tab[idx];
+ idx = tid - adap->tids.hpftid_base;
+ if (idx < adap->tids.nhpftids) {
+ f = &adap->tids.hpftid_tab[idx];
+ } else {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ idx += adap->tids.nhpftids;
+ }
+
if (f->tid != tid)
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 12ff69b3ba91..649842a8aa28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -804,6 +804,26 @@ static int setup_ppod_edram(struct adapter *adap)
return 0;
}
+static void adap_config_hpfilter(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int ret;
+
+ /* Enable HP filter region. Older fw will fail this request and
+ * it is fine.
+ */
+ param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &param, &val);
+
+ /* An error means FW doesn't know about HP filter support,
+ * it's not a problem, don't return an error.
+ */
+ if (ret < 0)
+ dev_err(adapter->pdev_dev,
+ "HP filter region isn't supported by FW\n");
+}
+
/**
* cxgb4_write_rss - write the RSS table for a given port
* @pi: the port
@@ -1427,8 +1447,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
unsigned int tid)
{
- void **p = &t->tid_tab[tid];
struct adapter *adap = container_of(t, struct adapter, tids);
+ void **p = &t->tid_tab[tid - t->tid_base];
spin_lock_bh(&adap->tid_release_lock);
*p = adap->tid_release_head;
@@ -1480,13 +1500,13 @@ static void process_tid_release_list(struct work_struct *work)
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
unsigned short family)
{
- struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
+ struct sk_buff *skb;
- WARN_ON(tid >= t->ntids);
+ WARN_ON(tid_out_of_range(&adap->tids, tid));
- if (t->tid_tab[tid]) {
- t->tid_tab[tid] = NULL;
+ if (t->tid_tab[tid - adap->tids.tid_base]) {
+ t->tid_tab[tid - adap->tids.tid_base] = NULL;
atomic_dec(&t->conns_in_use);
if (t->hash_base && (tid >= t->hash_base)) {
if (family == AF_INET6)
@@ -1518,6 +1538,7 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
+ unsigned int hpftid_bmap_size;
unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
@@ -1525,12 +1546,15 @@ static int tid_init(struct tid_info *t)
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
+ hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
+ t->nhpftids * sizeof(*t->hpftid_tab) +
+ hpftid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long) +
t->neotids * sizeof(*t->eotid_tab) +
@@ -1543,7 +1567,9 @@ static int tid_init(struct tid_info *t)
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
- t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
+ t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
@@ -1578,6 +1604,8 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->eotid_bmap, t->neotids);
}
+ if (t->nhpftids)
+ bitmap_zero(t->hpftid_bmap, t->nhpftids);
bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -3135,9 +3163,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
+ struct ch_sched_queue qe = { 0 };
+ struct ch_sched_params p = { 0 };
struct sched_class *e;
- struct ch_sched_params p;
- struct ch_sched_queue qe;
u32 req_rate;
int err = 0;
@@ -3154,6 +3182,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return -EINVAL;
}
+ qe.queue = index;
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
+ dev_err(adap->pdev_dev,
+ "Queue %u already bound to class %u of type: %u\n",
+ index, e->idx, e->info.u.params.level);
+ return -EBUSY;
+ }
+
/* Convert from Mbps to Kbps */
req_rate = rate * 1000;
@@ -3183,7 +3220,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return 0;
/* Fetch any available unused or matching scheduling class */
- memset(&p, 0, sizeof(p));
p.type = SCHED_CLASS_TYPE_PACKET;
p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
p.u.params.mode = SCHED_CLASS_MODE_CLASS;
@@ -4351,6 +4387,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"HMA configuration failed with error %d\n", ret);
if (is_t6(adapter->params.chip)) {
+ adap_config_hpfilter(adapter);
ret = setup_ppod_edram(adapter);
if (!ret)
dev_info(adapter->pdev_dev, "Successfully enabled "
@@ -4660,16 +4697,6 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/*
* Grab some of our basic fundamental operating parameters.
*/
-#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
-
-#define FW_PARAM_PFVF(param) \
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y_V(0) | \
- FW_PARAMS_PARAM_Z_V(0)
-
params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
params[2] = FW_PARAM_PFVF(L2T_END);
@@ -4687,6 +4714,16 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->sge.ingr_start = val[5];
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ params[0] = FW_PARAM_PFVF(HPFILTER_START);
+ params[1] = FW_PARAM_PFVF(HPFILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (ret < 0)
+ goto bye;
+
+ adap->tids.hpftid_base = val[0];
+ adap->tids.nhpftids = val[1] - val[0] + 1;
+
/* Read the raw mps entries. In T6, the last 2 tcam entries
* are reserved for raw mac addresses (rawf = 2, one per port).
*/
@@ -4698,6 +4735,9 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->rawf_start = val[0];
adap->rawf_cnt = val[1] - val[0] + 1;
}
+
+ adap->tids.tid_base =
+ t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
}
/* qids (ingress/egress) returned from firmware can be anywhere
@@ -5050,8 +5090,6 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
}
adap->params.crypto = ntohs(caps_cmd.cryptocaps);
}
-#undef FW_PARAM_PFVF
-#undef FW_PARAM_DEV
/* The MTU/MSS Table is initialized by now, so load their values. If
* we're initializing the adapter, then we'll make any modifications
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 0fa80bef575d..bb5513bdd293 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -672,10 +672,14 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
* 0 to driver. However, the hardware TCAM index
* starts from 0. Hence, the -1 here.
*/
- if (cls->common.prio <= adap->tids.nftids)
+ if (cls->common.prio <= (adap->tids.nftids +
+ adap->tids.nhpftids)) {
fidx = cls->common.prio - 1;
- else
+ if (fidx < adap->tids.nhpftids)
+ fs->prio = 1;
+ } else {
fidx = cxgb4_get_free_ftid(dev, inet_family);
+ }
/* Only insert FLOWER rule if its priority doesn't
* conflict with existing rules in the LETCAM.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 102b370fbd3e..1b7681a4eb32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct flow_action *actions = &cls->rule->action;
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
+ struct ch_sched_queue qe;
+ struct sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
}
}
+ for (i = 0; i < pi->nqsets; i++) {
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = i;
+
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Some queues are already bound to different class");
+ return -EBUSY;
+ }
+ }
+
return 0;
}
+static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = tc;
+ ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (i--) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+
+ return ret;
+}
+
+static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+}
+
static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
struct sched_class *e;
+ int ret;
u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
@@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
return -ENOMEM;
}
+ ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not bind queues to traffic class");
+ goto out_free;
+ }
+
tc_port_matchall->egress.hwtc = e->idx;
tc_port_matchall->egress.cookie = cls->cookie;
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
+
+out_free:
+ cxgb4_sched_class_free(dev, e->idx);
+ return ret;
}
static void cxgb4_matchall_free_tc(struct net_device *dev)
@@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_matchall_tc_unbind_queues(dev);
cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
@@ -137,7 +204,7 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
* -1 here. 1 slot is enough to create a wildcard matchall
* VIID rule.
*/
- if (cls->common.prio <= adap->tids.nftids)
+ if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids))
fidx = cls->common.prio - 1;
else
fidx = cxgb4_get_free_ftid(dev, PF_INET);
@@ -156,6 +223,8 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
fs = &tc_port_matchall->ingress.fs;
memset(fs, 0, sizeof(*fs));
+ if (fidx < adap->tids.nhpftids)
+ fs->prio = 1;
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
fs->hitcnts = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 8971dddcdb7a..ec3eb45ee3b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 speed, qcount = 0, qoffset = 0;
+ u32 start_a, start_b, end_a, end_b;
int ret;
- u8 i;
+ u8 i, j;
if (!mqprio->qopt.num_tc)
return 0;
@@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
qcount += mqprio->qopt.count[i];
+ start_a = mqprio->qopt.offset[i];
+ end_a = start_a + mqprio->qopt.count[i] - 1;
+ for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
+ start_b = mqprio->qopt.offset[j];
+ end_b = start_b + mqprio->qopt.count[j] - 1;
+
+ /* If queue count is 0, then the traffic
+ * belonging to this class will not use
+ * ETHOFLD queues. So, no need to validate
+ * further.
+ */
+ if (!mqprio->qopt.count[i])
+ break;
+
+ if (!mqprio->qopt.count[j])
+ continue;
+
+ if (max_t(u32, start_a, start_b) <=
+ min_t(u32, end_a, end_b)) {
+ netdev_err(dev,
+ "Queues can't overlap across tc\n");
+ return -EINVAL;
+ }
+ }
+
/* Convert byte per second to bits per second */
min_rate += (mqprio->min_rate[i] * 8);
max_rate += (mqprio->max_rate[i] * 8);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 133f8623ba86..269b8d9e25e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -176,7 +176,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Only insert U32 rule if its priority doesn't conflict with
* existing rules in the LETCAM.
*/
- if (filter_id >= adapter->tids.nftids ||
+ if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids ||
!cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
@@ -199,6 +199,8 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
+ if (filter_id < adapter->tids.nhpftids)
+ fs.prio = 1;
fs.tc_prio = cls->common.prio;
fs.tc_cookie = cls->knode.handle;
@@ -355,6 +357,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
unsigned int filter_id, max_tids, i, j;
struct cxgb4_link *link = NULL;
struct cxgb4_tc_u32_table *t;
+ struct filter_entry *f;
u32 handle, uhtid;
int ret;
@@ -363,8 +366,15 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Fetch the location to delete the filter. */
filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id >= adapter->tids.nftids ||
- cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
+ if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids)
+ return -ERANGE;
+
+ if (filter_id < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[filter_id];
+ else
+ f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+
+ if (cls->knode.handle != f->fs.tc_cookie)
return -ERANGE;
t = adapter->tc_u32;
@@ -445,7 +455,7 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
{
- unsigned int max_tids = adap->tids.nftids;
+ unsigned int max_tids = adap->tids.nftids + adap->tids.nhpftids;
struct cxgb4_tc_u32_table *t;
unsigned int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 861b25d28ed6..d9d27bc1ae67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -99,6 +99,7 @@ struct eotid_entry {
*/
struct tid_info {
void **tid_tab;
+ unsigned int tid_base;
unsigned int ntids;
struct serv_entry *stid_tab;
@@ -111,6 +112,11 @@ struct tid_info {
unsigned int natids;
unsigned int atid_base;
+ struct filter_entry *hpftid_tab;
+ unsigned long *hpftid_bmap;
+ unsigned int nhpftids;
+ unsigned int hpftid_base;
+
struct filter_entry *ftid_tab;
unsigned long *ftid_bmap;
unsigned int nftids;
@@ -147,9 +153,15 @@ struct tid_info {
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
{
+ tid -= t->tid_base;
return tid < t->ntids ? t->tid_tab[tid] : NULL;
}
+static inline bool tid_out_of_range(const struct tid_info *t, unsigned int tid)
+{
+ return ((tid - t->tid_base) >= t->ntids);
+}
+
static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
{
return atid < t->natids ? t->atid_tab[atid].data : NULL;
@@ -171,7 +183,7 @@ static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid, unsigned short family)
{
- t->tid_tab[tid] = data;
+ t->tid_tab[tid - t->tid_base] = data;
if (t->hash_base && (tid >= t->hash_base)) {
if (family == AF_INET6)
atomic_add(2, &t->hash_tids_in_use);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index e9e45006632d..1a16449e9deb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 3e61bd5d0c29..cebe1412d960 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_txq *txq;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return NULL;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
+}
+
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
struct sched_queue_entry *qe = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index e92ff68bdd0a..5cc74a5a1774 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ac4fb43bdec6..accad1101ad1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1321,6 +1321,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_PPOD_EDRAM = 0x23,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
+ FW_PARAMS_PARAM_DEV_HPFILTER_REGION_SUPPORT = 0x26,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index c9aebcde403a..33ace3307059 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1128,7 +1128,7 @@ net_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void net_timeout(struct net_device *dev)
+static void net_timeout(struct net_device *dev, unsigned int txqueue)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index acb2856936d2..bbd7b3175f09 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1095,7 +1095,7 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
/* netif_tx_lock held, BHs disabled */
-static void enic_tx_timeout(struct net_device *netdev)
+static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct enic *enic = netdev_priv(netdev);
schedule_work(&enic->tx_hang_reset);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 2814b96751b4..f30fa8e6ef80 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1298,7 +1298,7 @@ out_drop:
return NETDEV_TX_OK;
}
-static void gmac_tx_timeout(struct net_device *netdev)
+static void gmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
netdev_err(netdev, "Tx timeout\n");
gmac_dump_dma_state(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cce90b5925d9..1ea3372775e6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -964,7 +964,7 @@ dm9000_init_dm9000(struct net_device *dev)
}
/* Our watchdog timed out. Called by the networking layer */
-static void dm9000_timeout(struct net_device *dev)
+static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
{
struct board_info *db = netdev_priv(dev);
u8 reg_save;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index f1a2da15dd0a..d305d1b24b0a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1436,7 +1436,7 @@ static int de_close (struct net_device *dev)
return 0;
}
-static void de_tx_timeout (struct net_device *dev)
+static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct de_private *de = netdev_priv(dev);
const int irq = de->pdev->irq;
@@ -2039,7 +2039,7 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* remap CSR registers */
- regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+ regs = ioremap(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 0efdbd1a4a6f..32d470d4122a 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -2214,15 +2214,16 @@ static int __init dmfe_init_module(void)
if (cr6set)
dmfe_cr6_user_set = cr6set;
- switch(mode) {
- case DMFE_10MHF:
+ switch (mode) {
+ case DMFE_10MHF:
case DMFE_100MHF:
case DMFE_10MFD:
case DMFE_100MFD:
case DMFE_1M_HPNA:
dmfe_media_mode = mode;
break;
- default:dmfe_media_mode = DMFE_AUTO;
+ default:
+ dmfe_media_mode = DMFE_AUTO;
break;
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3e3e08698876..9e9d9eee29d9 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
const char tulip_media_cap[32] =
{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
-static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tulip_init_ring(struct net_device *dev);
static void tulip_free_ring(struct net_device *dev);
static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
@@ -534,7 +534,7 @@ free_ring:
}
-static void tulip_tx_timeout(struct net_device *dev)
+static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index b1f30b194300..117ffe08800d 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1809,8 +1809,8 @@ static int __init uli526x_init_module(void)
if (cr6set)
uli526x_cr6_user_set = cr6set;
- switch (mode) {
- case ULI526X_10MHF:
+ switch (mode) {
+ case ULI526X_10MHF:
case ULI526X_100MHF:
case ULI526X_10MFD:
case ULI526X_100MFD:
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 70cb2d689c2c..7f136488e67c 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -331,7 +331,7 @@ static void netdev_timer(struct timer_list *t);
static void init_rxtx_rings(struct net_device *dev);
static void free_rxtx_rings(struct netdev_private *np);
static void init_registers(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static int alloc_ringdesc(struct net_device *dev);
static void free_ringdesc(struct netdev_private *np);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
@@ -921,7 +921,7 @@ static void init_registers(struct net_device *dev)
iowrite32(0, ioaddr + RxStartDemand);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 55e720d2ea0c..26c5da032b1e 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -66,7 +66,7 @@ static const int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
static void rio_timer (struct timer_list *t);
-static void rio_tx_timeout (struct net_device *dev);
+static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue);
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
static void rio_free_tx (struct net_device *dev, int irq);
@@ -696,7 +696,7 @@ rio_timer (struct timer_list *t)
}
static void
-rio_tx_timeout (struct net_device *dev)
+rio_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 4a37a69764ce..b91387c456ba 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -432,7 +432,7 @@ static int mdio_wait_link(struct net_device *dev, int wait);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
@@ -969,7 +969,7 @@ static void netdev_timer(struct timer_list *t)
add_timer(&np->timer);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index e24979010969..5f8fa1145db6 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,19 +725,6 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
return nstat;
}
-static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -759,7 +746,7 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_stop = dnet_close,
.ndo_get_stats = dnet_get_stats,
.ndo_start_xmit = dnet_start_xmit,
- .ndo_do_ioctl = dnet_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 39eb7d525043..56f59db6ebf2 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1417,7 +1417,7 @@ drop:
return NETDEV_TX_OK;
}
-static void be_tx_timeout(struct net_device *netdev)
+static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ea4f17f5cce7..a817ca661c1f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -869,7 +869,7 @@ static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
return -ENOSYS;
}
-static void ethoc_tx_timeout(struct net_device *dev)
+static void ethoc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ethoc *priv = netdev_priv(dev);
u32 pending = ethoc_read(priv, INT_SOURCE);
@@ -1087,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
- priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
+ priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr,
resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
@@ -1096,7 +1096,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
if (netdev->mem_end) {
- priv->membase = devm_ioremap_nocache(&pdev->dev,
+ priv->membase = devm_ioremap(&pdev->dev,
netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 8ed85037f021..4572797f00d7 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1536,16 +1536,7 @@ static int ftgmac100_stop(struct net_device *netdev)
return 0;
}
-/* optional */
-static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netdev->phydev)
- return -ENXIO;
-
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
-static void ftgmac100_tx_timeout(struct net_device *netdev)
+static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1597,7 +1588,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_start_xmit = ftgmac100_hard_start_xmit,
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ftgmac100_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ftgmac100_tx_timeout,
.ndo_set_rx_mode = ftgmac100_set_rx_mode,
.ndo_set_features = ftgmac100_set_features,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c24fd56a2c71..84f10970299a 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -428,7 +428,7 @@ static void getlinktype(struct net_device *dev);
static void getlinkstatus(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
static void reset_timer(struct timer_list *t);
-static void fealnx_tx_timeout(struct net_device *dev);
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
@@ -1191,7 +1191,7 @@ static void reset_timer(struct timer_list *t)
}
-static void fealnx_tx_timeout(struct net_device *dev)
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->mem;
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 6a93293d31e0..67c436400352 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index a301f0095223..09dbcd819d84 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -288,7 +288,7 @@ static int dpaa_stop(struct net_device *net_dev)
return err;
}
-static void dpaa_tx_timeout(struct net_device *net_dev)
+static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct dpaa_percpu_priv *percpu_priv;
const struct dpaa_priv *priv;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 6437fe6b9abf..cc1b7f85e433 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -27,6 +27,20 @@ static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
mc_dev = to_fsl_mc_device(dev);
switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = DPRTC_EVENT_ETS1;
+ break;
+ case 1:
+ bit = DPRTC_EVENT_ETS2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (on)
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
+ break;
case PTP_CLK_REQ_PPS:
bit = DPRTC_EVENT_PPS;
break;
@@ -96,6 +110,12 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
ptp_clock_event(ptp_qoriq->clock, &event);
}
+ if (status & DPRTC_EVENT_ETS1)
+ extts_clean_up(ptp_qoriq, 0, true);
+
+ if (status & DPRTC_EVENT_ETS2)
+ extts_clean_up(ptp_qoriq, 1, true);
+
err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
DPRTC_IRQ_INDEX, status);
if (unlikely(err)) {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
index 4ac05bfef338..96ffeb948f08 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -9,9 +9,11 @@
/* Command versioning */
#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_VERSION_2 2
#define DPRTC_CMD_ID_OFFSET 4
#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+#define DPRTC_CMD_V2(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_VERSION_2)
/* Command IDs */
#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
@@ -19,7 +21,7 @@
#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
-#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD_V2(0x014)
#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index 311c184e1aef..05c413719e55 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -20,6 +20,8 @@ struct fsl_mc_io;
#define DPRTC_IRQ_INDEX 0
#define DPRTC_EVENT_PPS 0x08000000
+#define DPRTC_EVENT_ETS1 0x00800000
+#define DPRTC_EVENT_ETS2 0x00400000
int dprtc_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index edad4ca46327..fe942de19597 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select FSL_ENETC_MDIO
select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index d0db33e5b6b7..74f7ac253b8b 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -3,7 +3,7 @@
common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
+fsl-enetc-y := enetc_pf.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 17739906c966..1f79e36116a3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -149,11 +149,21 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
if (enetc_tx_csum(skb, &temp_bd))
flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+ else if (tx_ring->tsd_enable)
+ flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
+ if (flags & ENETC_TXBD_FLAGS_TSE) {
+ u32 temp;
+
+ temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
+ | (flags << ENETC_TXBD_FLAGS_OFFSET);
+ temp_bd.txstart = cpu_to_le32(temp);
+ }
+
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
*txbd = temp_bd;
@@ -227,6 +237,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
enetc_bdr_idx_inc(tx_ring, &i);
tx_ring->next_to_use = i;
+ skb_tx_timestamp(skb);
+
/* let H/W know BD ring has been updated */
enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
@@ -1503,6 +1515,8 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return enetc_setup_tc_taprio(ndev, type_data);
case TC_SETUP_QDISC_CBS:
return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 7ee0da6d0015..dd4a227ffc7a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -72,6 +72,7 @@ struct enetc_bdr {
struct enetc_ring_stats stats;
dma_addr_t bd_dma_base;
+ u8 tsd_enable; /* Time specific departure */
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
@@ -256,8 +257,10 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct net_device *ndev);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(ndev) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 880a8ed8bb47..301ee0dde02d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -579,6 +579,7 @@ static int enetc_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_ALL);
#else
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
#endif
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 51f543ef37a8..62554f28ce07 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -200,6 +200,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
@@ -358,6 +359,7 @@ union enetc_tx_bd {
u8 l4_csoff;
u8 flags;
}; /* default layout */
+ __le32 txstart;
__le32 lstatus;
};
};
@@ -378,11 +380,14 @@ union enetc_tx_bd {
};
#define ENETC_TXBD_FLAGS_L4CS BIT(0)
+#define ENETC_TXBD_FLAGS_TSE BIT(1)
#define ENETC_TXBD_FLAGS_W BIT(2)
#define ENETC_TXBD_FLAGS_CSUM BIT(3)
+#define ENETC_TXBD_FLAGS_TXSTART BIT(4)
#define ENETC_TXBD_FLAGS_EX BIT(6)
#define ENETC_TXBD_FLAGS_F BIT(7)
-
+#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
+#define ENETC_TXBD_FLAGS_OFFSET 24
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
@@ -615,3 +620,7 @@ struct enetc_cbd {
/* Port time gating capability register */
#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+
+/* Port time specific departure */
+#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
+#define ENETC_TSDE BIT(31)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 149883c8f0b8..48c32a171afa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -1,41 +1,56 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
#include <linux/mdio.h>
#include <linux/of_mdio.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_mdio.h"
+#include "enetc_pf.h"
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
#define ENETC_MDIO_CTL 0x4 /* MDIO control */
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define enetc_mdio_rd(hw, off) \
- enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
-#define enetc_mdio_wr(hw, off, val) \
- enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
+static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
-#define ENETC_MDC_DIV 258
+#define enetc_mdio_rd(mdio_priv, off) \
+ _enetc_mdio_rd(mdio_priv, ENETC_##off)
+#define enetc_mdio_wr(mdio_priv, off, val) \
+ _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
+#define MDIO_CFG_HOLD(x) (((x) << 2) & GENMASK(4, 2))
#define MDIO_CFG_ENC45 BIT(6)
/* external MDIO only - driven on neg MDC edge */
#define MDIO_CFG_NEG BIT(23)
+#define ENETC_EMDIO_CFG \
+ (MDIO_CFG_HOLD(2) | \
+ MDIO_CFG_CLKDIV(258) | \
+ MDIO_CFG_NEG)
+
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_hw *hw)
+static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
u32 val;
@@ -46,12 +61,11 @@ static int enetc_mdio_wait_complete(struct enetc_hw *hw)
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
- struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
- mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ mdio_cfg = ENETC_EMDIO_CFG;
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_cfg |= MDIO_CFG_ENC45;
@@ -61,44 +75,44 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
}
/* write the value */
- enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_mdio_write);
int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
- struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
- mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ mdio_cfg = ENETC_EMDIO_CFG;
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_cfg |= MDIO_CFG_ENC45;
@@ -107,86 +121,56 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
}
/* initiate the read */
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
return value;
}
+EXPORT_SYMBOL_GPL(enetc_mdio_read);
-int enetc_mdio_probe(struct enetc_pf *pf)
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct device_node *np;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- np = of_get_child_by_name(dev->of_node, "mdio");
- if (!np) {
- dev_err(dev, "MDIO node missing\n");
- return -EINVAL;
- }
-
- err = of_mdiobus_register(bus, np);
- if (err) {
- of_node_put(np);
- dev_err(dev, "cannot register MDIO bus\n");
- return err;
- }
+ struct enetc_hw *hw;
- of_node_put(np);
- pf->mdio = bus;
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
- return 0;
-}
+ hw->port = port_regs;
-void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
+ return hw;
}
+EXPORT_SYMBOL_GPL(enetc_hw_alloc);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
deleted file mode 100644
index 60c9a3889824..000000000000
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
-
-#include <linux/phy.h>
-#include "enetc_pf.h"
-
-struct enetc_mdio_priv {
- struct enetc_hw *hw;
-};
-
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index fbd41ce01f06..ebc635f8a4cc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
#include <linux/of_mdio.h>
-#include "enetc_mdio.h"
+#include "enetc_pf.h"
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
@@ -13,17 +14,29 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
{
struct enetc_mdio_priv *mdio_priv;
struct device *dev = &pdev->dev;
+ void __iomem *port_regs;
struct enetc_hw *hw;
struct mii_bus *bus;
int err;
- hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return -ENOMEM;
+ port_regs = pci_iomap(pdev, 0, 0);
+ if (!port_regs) {
+ dev_err(dev, "iomap failed\n");
+ err = -ENXIO;
+ goto err_ioremap;
+ }
+
+ hw = enetc_hw_alloc(dev, port_regs);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ goto err_hw_alloc;
+ }
bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
bus->name = ENETC_MDIO_BUS_NAME;
bus->read = enetc_mdio_read;
@@ -31,13 +44,14 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(dev, "device enable failed\n");
- return err;
+ goto err_pci_enable;
}
err = pci_request_region(pdev, 0, KBUILD_MODNAME);
@@ -46,13 +60,6 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
- hw->port = pci_iomap(pdev, 0, 0);
- if (!hw->port) {
- err = -ENXIO;
- dev_err(dev, "iomap failed\n");
- goto err_ioremap;
- }
-
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -62,12 +69,14 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- iounmap(mdio_priv->hw->port);
-err_ioremap:
pci_release_mem_regions(pdev);
err_pci_mem_reg:
pci_disable_device(pdev);
-
+err_pci_enable:
+err_mdiobus_alloc:
+ iounmap(port_regs);
+err_hw_alloc:
+err_ioremap:
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index e7482d483b28..fc0d7d99e9a1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -2,6 +2,7 @@
/* Copyright 2017-2019 NXP */
#include <linux/module.h>
+#include <linux/fsl/enetc_mdio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "enetc_pf.h"
@@ -749,6 +750,52 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
+static int enetc_mdio_probe(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ np = of_get_child_by_name(dev->of_node, "mdio");
+ if (!np) {
+ dev_err(dev, "MDIO node missing\n");
+ return -EINVAL;
+ }
+
+ err = of_mdiobus_register(bus, np);
+ if (err) {
+ of_node_put(np);
+ dev_err(dev, "cannot register MDIO bus\n");
+ return err;
+ }
+
+ of_node_put(np);
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 10dd1b53bb08..59e65a6f6c3e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -49,7 +49,3 @@ struct enetc_pf {
int enetc_msg_psi_init(struct enetc_pf *pf);
void enetc_msg_psi_free(struct enetc_pf *pf);
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
-
-/* MDIO */
-int enetc_mdio_probe(struct enetc_pf *pf);
-void enetc_mdio_remove(struct enetc_pf *pf);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 2e99438cb1bf..0c6bf3a55a9a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -36,7 +36,6 @@ void enetc_sched_speed_set(struct net_device *ndev)
case SPEED_10:
default:
pspeed = ENETC_PMR_PSPEED_10M;
- netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
}
priv->speed = speed;
@@ -156,6 +155,11 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
int err;
int i;
+ /* TSD and Qbv are mutually exclusive in hardware */
+ for (i = 0; i < priv->num_tx_rings; i++)
+ if (priv->tx_ring[i]->tsd_enable)
+ return -EBUSY;
+
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(&priv->si->hw,
priv->tx_ring[i]->index,
@@ -192,7 +196,6 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
- u32 tc_max_sized_frame;
u8 tc = cbs->queue;
u8 prio_top, prio_next;
int bw_sum = 0;
@@ -250,7 +253,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -298,3 +301,33 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_etf_qopt_offload *qopt = type_data;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ int tc;
+
+ if (!tc_nums)
+ return -EOPNOTSUPP;
+
+ tc = qopt->queue;
+
+ if (tc < 0 || tc >= priv->num_tx_rings)
+ return -EINVAL;
+
+ /* Do not support TXSTART and TX CSUM offload simutaniously */
+ if (ndev->features & NETIF_F_CSUM_MASK)
+ return -EBUSY;
+
+ /* TSD and Qbv are mutually exclusive in hardware */
+ if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ return -EBUSY;
+
+ priv->tx_ring[tc]->tsd_enable = qopt->enable;
+ enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
+ qopt->enable ? ENETC_TSDE : 0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9294027e9d90..4432a59904c7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1141,7 +1141,7 @@ fec_stop(struct net_device *ndev)
static void
-fec_timeout(struct net_device *ndev)
+fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 30cdb246d020..7a3f066e611d 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -84,7 +84,7 @@ static int debug = -1; /* the above default */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "debugging messages level");
-static void mpc52xx_fec_tx_timeout(struct net_device *dev)
+static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -785,16 +785,6 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
};
-static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return -ENOTSUPP;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_open = mpc52xx_fec_open,
.ndo_stop = mpc52xx_fec_close,
@@ -802,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mpc52xx_fec_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 41c6fa200e74..e1901874c19f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -110,7 +110,7 @@ do { \
/* Interface Mode Register (IF_MODE) */
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
tmp = 0;
switch (phy_if) {
case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_XGMII;
+ tmp |= IF_MODE_10G;
break;
default:
tmp |= IF_MODE_GMII;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index f0806ace1ae2..55f2122c3217 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -692,7 +692,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->res = __devm_request_region(dev,
fman_get_mem_region(priv->fman),
- res.start, res.end + 1 - res.start,
+ res.start, resource_size(&res),
"mac");
if (!mac_dev->res) {
dev_err(dev, "__devm_request_mem_region(mac) failed\n");
@@ -701,7 +701,7 @@ static int mac_probe(struct platform_device *_of_dev)
}
priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- mac_dev->res->end + 1 - mac_dev->res->start);
+ resource_size(mac_dev->res));
if (!priv->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
err = -EIO;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 3981c06f082f..add61fed33ee 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -641,7 +641,7 @@ static void fs_timeout_work(struct work_struct *work)
netif_wake_queue(dev);
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -882,14 +882,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
extern int fs_mii_connect(struct net_device *dev);
extern void fs_mii_disconnect(struct net_device *dev);
@@ -907,7 +899,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_do_ioctl = fs_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 72868a28b621..f7e5cafe89a9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2093,7 +2093,7 @@ static void gfar_reset_task(struct work_struct *work)
reset_gfar(priv->ndev);
}
-static void gfar_timeout(struct net_device *dev)
+static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2205,13 +2205,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ bool do_tstamp;
+
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
frags = skb_shinfo(skb)->nr_frags;
/* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if (unlikely(do_tstamp))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@@ -2225,7 +2229,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = be16_to_cpu(next->length) +
GMAC_FCB_LEN + GMAC_TXPAL_LEN;
@@ -2235,7 +2239,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
~0x7UL);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index f839fa94ebdd..0d101c00286f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3545,7 +3545,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* ucc_geth_timeout gets called when a packet has not been
* transmitted after a set amount of time.
*/
-static void ucc_geth_timeout(struct net_device *dev)
+static void ucc_geth_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index e03b30c60dcf..c82c85ef5fb3 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a011043;
};
static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
/* Return all Fs if nothing was there */
- if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
"little-endian");
+ priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+ "fsl,erratum-a011043");
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 1eca0fdb9933..a7b7a4aace79 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -93,7 +93,7 @@ static irqreturn_t fjn_interrupt(int irq, void *dev_id);
static void fjn_rx(struct net_device *dev);
static void fjn_reset(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-static void fjn_tx_timeout(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue);
static const struct ethtool_ops netdev_ethtool_ops;
/*
@@ -774,7 +774,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
/*====================================================================*/
-static void fjn_tx_timeout(struct net_device *dev)
+static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct local_info *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 9b7a8db9860f..e032563ceefd 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -845,7 +845,7 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_napi_enabled(priv);
}
-static void gve_tx_timeout(struct net_device *dev)
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 150a8ccfb8b1..d9718b87279d 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -779,7 +779,7 @@ static int hip04_mac_stop(struct net_device *ndev)
return 0;
}
-static void hip04_timeout(struct net_device *ndev)
+static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hip04_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 90ab7ade44c4..57c3bc4f7089 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -675,18 +675,6 @@ static void hisi_femac_net_set_rx_mode(struct net_device *dev)
}
}
-static int hisi_femac_net_ioctl(struct net_device *dev,
- struct ifreq *ifreq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifreq, cmd);
-}
-
static const struct ethtool_ops hisi_femac_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -697,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = {
.ndo_open = hisi_femac_net_open,
.ndo_stop = hisi_femac_net_close,
.ndo_start_xmit = hisi_femac_net_xmit,
- .ndo_do_ioctl = hisi_femac_net_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = hisi_femac_set_mac_address,
.ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
};
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 247de9105d10..4fb776920a93 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -893,7 +893,7 @@ static void hix5hd2_tx_timeout_task(struct work_struct *work)
hix5hd2_net_open(priv->netdev);
}
-static void hix5hd2_net_timeout(struct net_device *dev)
+static void hix5hd2_net_timeout(struct net_device *dev, unsigned int txqueue)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 14ab20491fd0..c117074c16e3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
if (unlikely(!skb)) {
- netdev_err(ndev, "alloc rx skb fail\n");
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
@@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
container_of(napi, struct hns_nic_ring_data, napi);
struct hnae_ring *ring = ring_data->ring;
-try_again:
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);
@@ -1066,7 +1064,7 @@ try_again:
napi_complete(napi);
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
- goto try_again;
+ return budget;
}
}
@@ -1485,7 +1483,7 @@ static int hns_nic_net_stop(struct net_device *ndev)
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
#define HNS_TX_TIMEO_LIMIT (40 * HZ)
-static void hns_nic_net_timeout(struct net_device *ndev)
+static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1499,20 +1497,6 @@ static void hns_nic_net_timeout(struct net_device *ndev)
}
}
-static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
-{
- struct phy_device *phy_dev = netdev->phydev;
-
- if (!netif_running(netdev))
- return -EINVAL;
-
- if (!phy_dev)
- return -ENOTSUPP;
-
- return phy_mii_ioctl(phy_dev, ifr, cmd);
-}
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1960,7 +1944,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_tx_timeout = hns_nic_net_timeout,
.ndo_set_mac_address = hns_nic_net_set_mac_address,
.ndo_change_mtu = hns_nic_change_mtu,
- .ndo_do_ioctl = hns_nic_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index d01bf536eb86..7aa2fac76c5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,6 +3,8 @@
# Makefile for the HISILICON network device drivers.
#
+ccflags-y += -I$(srctree)/$(src)
+
obj-$(CONFIG_HNS3) += hns3pf/
obj-$(CONFIG_HNS3) += hns3vf/
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3b5e2d7251e7..a3e4081b84ba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -164,11 +164,7 @@ enum hnae3_reset_type {
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
-};
-
-enum hnae3_flr_state {
- HNAE3_FLR_DOWN,
- HNAE3_FLR_DONE,
+ HNAE3_MAX_RESET,
};
enum hnae3_port_base_vlan_state {
@@ -575,8 +571,7 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table;
};
-#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
-#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
+#define HNAE3_INT_NAME_LEN 32
#define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 6b328a259efc..1d4ffc5f408a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -176,7 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL;
}
- ring = &priv->ring[q_num];
+ ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
@@ -209,10 +209,10 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
- ring = &priv->ring[q_num + h->kinfo.num_tqps];
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
- rx_desc = &ring->desc[rx_index];
+ rx_desc = &ring->desc[rx_index];
addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
@@ -297,8 +297,8 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOMEM;
- len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
+ len = scnprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
uncopy_bytes = copy_to_user(buffer, buf, len);
kfree(buf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 69545dd6c938..acb796cc10d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -24,6 +24,12 @@
#include "hnae3.h"
#include "hns3_enet.h"
+/* All hns3 tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "hns3_trace.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
@@ -54,6 +60,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define HNS3_INNER_VLAN_TAG 1
#define HNS3_OUTER_VLAN_TAG 2
+#define HNS3_MIN_TX_LEN 33U
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -127,18 +135,21 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
continue;
if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "TxRx",
- txrx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "TxRx", txrx_int_idx++);
txrx_int_idx++;
} else if (tqp_vectors->rx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "Rx",
- rx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Rx", rx_int_idx++);
} else if (tqp_vectors->tx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "Tx",
- tx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Tx", tx_int_idx++);
} else {
/* Skip this unused q_vector */
continue;
@@ -155,6 +166,8 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}
+ disable_irq(tqp_vectors->vector_irq);
+
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
@@ -173,6 +186,7 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
/* enable vector */
hns3_mask_vector_irq(tqp_vector, 1);
@@ -372,18 +386,6 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
- /* the device can work without cpu rmap, only aRFS needs it */
- ret = hns3_set_rx_cpu_rmap(netdev);
- if (ret)
- netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
-
- /* get irq resource for all vectors */
- ret = hns3_nic_init_irq(priv);
- if (ret) {
- netdev_err(netdev, "init irq failed! ret=%d\n", ret);
- goto free_rmap;
- }
-
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
/* enable the vectors */
@@ -396,22 +398,15 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
- if (ret)
- goto out_start_err;
-
- return 0;
-
-out_start_err:
- set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ for (j = i - 1; j >= 0; j--)
+ hns3_vector_disable(&priv->tqp_vector[j]);
+ }
- hns3_nic_uninit_irq(priv);
-free_rmap:
- hns3_free_rx_cpu_rmap(netdev);
return ret;
}
@@ -508,11 +503,6 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
- hns3_free_rx_cpu_rmap(netdev);
-
- /* free irq resources */
- hns3_nic_uninit_irq(priv);
-
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
* to disable the ring through firmware when downing the netdev.
@@ -734,6 +724,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
+ trace_hns3_tso(skb);
+
return 0;
}
@@ -1138,6 +1130,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use);
ring_ptr_move_fw(ring, next_to_use);
return HNS3_LIKELY_BD_NUM;
}
@@ -1161,6 +1154,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use);
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
@@ -1286,6 +1280,14 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
return false;
}
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_SKB_FRAGS; i++)
+ size[i] = skb_frag_size(&shinfo->frags[i]);
+}
+
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct net_device *netdev,
struct sk_buff *skb)
@@ -1297,8 +1299,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_num(skb, bd_size);
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
- !hns3_skb_need_linearized(skb, bd_size, bd_num))
+ !hns3_skb_need_linearized(skb, bd_size, bd_num)) {
+ trace_hns3_over_8bd(skb);
goto out;
+ }
if (__skb_linearize(skb))
return -ENOMEM;
@@ -1306,8 +1310,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(skb) &&
- bd_num > HNS3_MAX_NON_TSO_BD_NUM))
+ bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
+ trace_hns3_over_8bd(skb);
return -ENOMEM;
+ }
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
@@ -1405,6 +1411,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
int bd_num = 0;
int ret;
+ /* Hardware can only handle short frames above 32 bytes */
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ return NETDEV_TX_OK;
+
/* Prefetch the data used later */
prefetch(skb->data);
@@ -1448,6 +1458,7 @@ out:
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
cpu_to_le16(BIT(HNS3_TXD_FE_B));
+ trace_hns3_tx_desc(ring, pre_ntu);
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
@@ -1556,6 +1567,37 @@ static int hns3_nic_set_features(struct net_device *netdev,
return 0;
}
+static netdev_features_t hns3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+#define HNS3_MAX_HDR_LEN 480U
+#define HNS3_MAX_L4_HDR_LEN 60U
+
+ size_t len;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ if (skb->encapsulation)
+ len = skb_inner_transport_header(skb) - skb->data;
+ else
+ len = skb_transport_header(skb) - skb->data;
+
+ /* Assume L4 is 60 byte as TCP is the only protocol with a
+ * a flexible value, and it's max len is 60 bytes.
+ */
+ len += HNS3_MAX_L4_HDR_LEN;
+
+ /* Hardware only supports checksum on the skb with a max header
+ * len of 480 bytes.
+ */
+ if (len > HNS3_MAX_HDR_LEN)
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -1869,7 +1911,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return true;
}
-static void hns3_nic_net_timeout(struct net_device *ndev)
+static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
@@ -1970,6 +2012,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_do_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
+ .ndo_features_check = hns3_features_check,
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
@@ -2051,10 +2094,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
- if (!ae_dev) {
- ret = -ENOMEM;
- return ret;
- }
+ if (!ae_dev)
+ return -ENOMEM;
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
@@ -2497,8 +2538,8 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
rmb(); /* Make sure head is ready before touch any data */
if (unlikely(!is_valid_clean_head(ring, head))) {
- netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
- ring->next_to_use, ring->next_to_clean);
+ hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
u64_stats_update_begin(&ring->syncp);
ring->stats.io_err_cnt++;
@@ -2584,6 +2625,12 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
+static bool hns3_page_is_reusable(struct page *page)
+{
+ return page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page);
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2598,7 +2645,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse
*/
- if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
+ if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
@@ -2668,6 +2715,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+
+ trace_hns3_gro(skb);
+
return 0;
}
@@ -2788,7 +2838,6 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
-#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
@@ -2805,6 +2854,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
return -ENOMEM;
}
+ trace_hns3_rx_desc(ring);
prefetchw(skb->data);
ring->pending_buf = 1;
@@ -2814,7 +2864,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
+ if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
@@ -2832,33 +2882,19 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- return HNS3_NEED_ADD_FRAG;
+ return 0;
}
-static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
- bool pending)
+static int hns3_add_frag(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
- struct hns3_desc *pre_desc;
+ struct hns3_desc *desc;
u32 bd_base_info;
- int pre_bd;
- /* if there is pending bd, the SW param next_to_clean has moved
- * to next and the next is NULL
- */
- if (pending) {
- pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
- ring->desc_num;
- pre_desc = &ring->desc[pre_bd];
- bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
- } else {
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- }
-
- while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
+ do {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2893,9 +2929,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ trace_hns3_rx_desc(ring);
ring_ptr_move_fw(ring, next_to_clean);
ring->pending_buf++;
- }
+ } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
return 0;
}
@@ -3063,28 +3100,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
if (ret < 0) /* alloc buffer fail */
return ret;
- if (ret > 0) { /* need add frag */
- ret = hns3_add_frag(ring, desc, false);
+ if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
+ ret = hns3_add_frag(ring);
if (ret)
return ret;
-
- /* As the head data may be changed when GRO enable, copy
- * the head data in after other data rx completed
- */
- memcpy(skb->data, ring->va,
- ALIGN(ring->pull_len, sizeof(long)));
}
} else {
- ret = hns3_add_frag(ring, desc, true);
+ ret = hns3_add_frag(ring);
if (ret)
return ret;
+ }
- /* As the head data may be changed when GRO enable, copy
- * the head data in after other data rx completed
- */
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ if (skb->len > HNS3_RX_HEAD_SIZE)
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
- }
ret = hns3_handle_bdinfo(ring, skb);
if (unlikely(ret)) {
@@ -3590,26 +3622,25 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
- hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ /* Since the mapping can be overwritten, when fail to get the
+ * chain between vector and ring, we should go on to deal with
+ * the remaining options.
+ */
+ if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
- free_irq(tqp_vector->vector_irq, tqp_vector);
- tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
- }
-
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
}
}
-static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
+static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
@@ -3621,11 +3652,10 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[i];
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
if (ret)
- return ret;
+ return;
}
devm_kfree(&pdev->dev, priv->tqp_vector);
- return 0;
}
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
@@ -4024,6 +4054,18 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto out_init_irq_fail;
+ }
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
@@ -4045,6 +4087,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_client_start:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+out_init_irq_fail:
unregister_netdev(netdev);
out_reg_netdev_fail:
hns3_uninit_phy(netdev);
@@ -4082,15 +4127,17 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
goto out_netdev_free;
}
+ hns3_free_rx_cpu_rmap(netdev);
+
+ hns3_nic_uninit_irq(priv);
+
hns3_del_all_fd_rules(netdev, true);
hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
- ret = hns3_nic_dealloc_vector_data(priv);
- if (ret)
- netdev_err(netdev, "dealloc vector error\n");
+ hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv);
if (ret)
@@ -4417,17 +4464,32 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto err_init_irq_fail;
+ }
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
- goto err_uninit_ring;
+ goto err_client_start_fail;
}
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
-err_uninit_ring:
+err_client_start_fail:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+err_init_irq_fail:
hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
@@ -4477,6 +4539,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
hns3_clear_all_ring(handle, true);
hns3_reset_tx_queue(priv->ae_handle);
@@ -4484,9 +4548,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
- ret = hns3_nic_dealloc_vector_data(priv);
- if (ret)
- netdev_err(netdev, "dealloc vector error\n");
+ hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv);
if (ret)
@@ -4652,7 +4714,7 @@ static int __init hns3_init_module(void)
pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
- snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
+ snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
hns3_driver_name);
client.ops = &client_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 9d47abd5c37c..abefd7a179f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -673,4 +673,5 @@ void hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6e0212b79438..c03856e63320 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -423,9 +423,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_",
- prefix, i);
- n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ prefix, i);
size_left = (ETH_GSTRING_LEN - 1) - n1;
/* now, concatenate the stats string to it */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
new file mode 100644
index 000000000000..7bddcca148a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HNS3_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32))
+
+DECLARE_EVENT_CLASS(hns3_skb_template,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, headlen)
+ __field(unsigned int, len)
+ __field(__u8, nr_frags)
+ __field(__u8, ip_summed)
+ __field(unsigned int, hdr_len)
+ __field(unsigned short, gso_size)
+ __field(unsigned short, gso_segs)
+ __field(unsigned int, gso_type)
+ __field(bool, fraglist)
+ __array(__u32, size, MAX_SKB_FRAGS)
+ ),
+
+ TP_fast_assign(
+ __entry->headlen = skb_headlen(skb);
+ __entry->len = skb->len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_segs = skb_shinfo(skb)->gso_segs;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->hdr_len = skb->encapsulation ?
+ skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
+ skb_transport_offset(skb) + tcp_hdrlen(skb);
+ __entry->ip_summed = skb->ip_summed;
+ __entry->fraglist = skb_has_frag_list(skb);
+ hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
+ ),
+
+ TP_printk(
+ "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%d %u): %s",
+ __entry->headlen, __entry->len, __entry->hdr_len,
+ __entry->ip_summed, __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->fraglist, __entry->nr_frags,
+ __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32))
+ )
+);
+
+DEFINE_EVENT(hns3_skb_template, hns3_over_8bd,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_gro,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_tso,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+TRACE_EVENT(hns3_tx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu),
+ TP_ARGS(ring, cur_ntu),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr,
+ memcpy(__entry->desc, &ring->desc[cur_ntu],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hns3_rx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __field(dma_addr_t, buf_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr;
+ __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
+ memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad) buf(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma, &__entry->buf_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+#endif /* _HNS3_TRACE_H_ */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns3_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 940ead3970d1..7f509eff562e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -479,19 +479,6 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
-static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
-{
- spin_lock(&ring->lock);
- hclge_free_cmd_desc(ring);
- spin_unlock(&ring->lock);
-}
-
-static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
-{
- hclge_destroy_queue(&hw->cmq.csq);
- hclge_destroy_queue(&hw->cmq.crq);
-}
-
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -501,5 +488,6 @@ void hclge_cmd_uninit(struct hclge_dev *hdev)
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
- hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclge_free_cmd_desc(&hdev->hw.cmq.crq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d97da67f07a1..96498d9b4754 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
+#include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_DESC_DATA_LEN 6
@@ -63,6 +64,7 @@ enum hclge_cmd_status {
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
};
struct hclge_cmq {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 112df34b3869..67fad80035d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -73,8 +73,6 @@ static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
-#define HCLGE_GET_DFX_REG_TYPE_CNT 4
-
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc;
int index;
@@ -886,8 +884,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
}
}
-static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
- bool sel_x, u32 loc)
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
@@ -912,7 +910,7 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
- return;
+ return ret;
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
@@ -931,16 +929,76 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ return ret;
+}
+
+static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (cnt != hdev->hclge_fd_rule_num)
+ return -EINVAL;
+
+ return cnt;
}
static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
{
- u32 i;
+ int i, ret, rule_cnt;
+ u16 *rule_locs;
+
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only FD-supported dev supports dump fd tcam\n");
+ return;
+ }
+
+ if (!hdev->hclge_fd_rule_num ||
+ !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return;
- for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
- hclge_dbg_fd_tcam_read(hdev, 0, true, i);
- hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+ rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ sizeof(u16), GFP_KERNEL);
+ if (!rule_locs)
+ return;
+
+ rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
+ if (rule_cnt <= 0) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get rule number, ret = %d\n", rule_cnt);
+ kfree(rule_locs);
+ return;
}
+
+ for (i = 0; i < rule_cnt; i++) {
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key x, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key y, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+ }
+
+ kfree(rule_locs);
}
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
@@ -976,6 +1034,14 @@ void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
+static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
+ hdev->last_serv_processed);
+ dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+}
+
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
struct hclge_desc *desc_src, *desc_tmp;
@@ -1227,6 +1293,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
+ hclge_dbg_dump_serv_info(hdev);
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
hclge_dbg_get_m7_stats_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index dc66b4e13377..c85b72dc44d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -505,7 +505,7 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
+ .reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
@@ -599,7 +599,7 @@ static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
+ .reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(9), .msg = "low_water_line_err_port",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(10), .msg = "hi_water_line_err_port",
@@ -1898,10 +1898,8 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!desc)
+ return -ENOMEM;
ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
reset_requests);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 13dbd249f35f..ec5f6eeb639b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -72,6 +72,8 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
+static struct workqueue_struct *hclge_wq;
+
static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
@@ -416,7 +418,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
int i, k, n;
@@ -453,7 +455,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
{
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
u16 i, k, n;
@@ -802,7 +804,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -815,8 +817,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
hclge_update_stats(handle, NULL);
- mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -860,9 +862,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
usleep_range(1000, 2000);
} while (timeout++ < HCLGE_QUERY_MAX_CNT);
- ret = hclge_parse_func_status(hdev, req);
-
- return ret;
+ return hclge_parse_func_status(hdev, req);
}
static int hclge_query_pf_resource(struct hclge_dev *hdev)
@@ -880,12 +880,12 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
}
req = (struct hclge_pf_res_cmd *)desc.data;
- hdev->num_tqps = __le16_to_cpu(req->tqp_num);
- hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
hdev->tx_buf_size =
- __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
@@ -893,7 +893,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (req->dv_buf_size)
hdev->dv_buf_size =
- __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->dv_buf_size = HCLGE_DEFAULT_DV;
@@ -901,10 +901,10 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */
@@ -917,7 +917,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
hdev->num_nic_msi = hdev->num_msi;
@@ -1331,11 +1331,7 @@ static int hclge_get_cap(struct hclge_dev *hdev)
}
/* get pf resource */
- ret = hclge_query_pf_resource(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
-
- return ret;
+ return hclge_query_pf_resource(hdev);
}
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
@@ -2619,30 +2615,21 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac speed dup fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac autoneg fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Fec mode init fail, ret = %d\n", ret);
+ if (ret)
return ret;
- }
}
ret = hclge_set_mac_mtu(hdev, hdev->mps);
@@ -2665,31 +2652,27 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
- if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
- &hdev->mbx_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
- &hdev->rst_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
- if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
- !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- system_wq, &hdev->service_task,
+ hclge_wq, &hdev->service_task,
delay_time);
- }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2748,6 +2731,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (!client)
return;
+
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
state = hclge_get_mac_phy_link(hdev);
if (state != hdev->hw.mac.link) {
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
@@ -2761,6 +2748,8 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
hdev->hw.mac.link = state;
}
+
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
static void hclge_update_port_capability(struct hclge_mac *mac)
@@ -2831,6 +2820,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
return ret;
}
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
+ * set to mac->speed.
+ */
+ if (!le32_to_cpu(resp->speed))
+ return 0;
+
mac->speed = le32_to_cpu(resp->speed);
/* if resp->speed_ability is 0, it means it's an old version
* firmware, do not update these params
@@ -2906,7 +2901,7 @@ static int hclge_get_status(struct hnae3_handle *handle)
static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{
- if (pci_num_vf(hdev->pdev) == 0) {
+ if (!pci_num_vf(hdev->pdev)) {
dev_err(&hdev->pdev->dev,
"SRIOV is disabled, can not get vport(%d) info.\n", vf);
return NULL;
@@ -2940,6 +2935,9 @@ static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
ivf->trusted = vport->vf_info.trusted;
ivf->min_tx_rate = 0;
ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
ether_addr_copy(ivf->mac, vport->vf_info.mac);
return 0;
@@ -2998,8 +2996,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- dev_info(&hdev->pdev->dev, "received event 0x%x\n",
- msix_src_reg);
*clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@@ -3172,8 +3168,10 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
/* this would be explicitly freed in the end */
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, "hclge_misc", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -3247,7 +3245,8 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 200
+#define HCLGE_RESET_WAIT_CNT 350
+
u32 val, reg, reg_bit;
u32 cnt = 0;
@@ -3264,8 +3263,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
- case HNAE3_FLR_RESET:
- break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -3273,20 +3270,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
- if (hdev->reset_type == HNAE3_FLR_RESET) {
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < HCLGE_RESET_WAIT_CNT)
- msleep(HCLGE_RESET_WATI_MS);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout: %u\n", cnt);
- return -EBUSY;
- }
-
- return 0;
- }
-
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -3352,7 +3335,19 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
-static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
{
struct hclge_pf_rst_sync_cmd *req;
struct hclge_desc desc;
@@ -3363,26 +3358,28 @@ static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
do {
+ /* vf need to down netdev by mbx during PF or FLR reset */
+ hclge_mailbox_service_task(hdev);
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
/* for compatible with old firmware, wait
* 100 ms for VF to stop IO
*/
if (ret == -EOPNOTSUPP) {
msleep(HCLGE_RESET_SYNC_TIME);
- return 0;
+ return;
} else if (ret) {
- dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
- ret);
- return ret;
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return;
} else if (req->all_vf_ready) {
- return 0;
+ return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
hclge_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
- dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
- return -ETIME;
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
}
void hclge_report_hw_error(struct hclge_dev *hdev,
@@ -3462,12 +3459,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
- case HNAE3_FLR_RESET:
- dev_info(&pdev->dev, "FLR requested\n");
- /* schedule again to check later */
- set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
- break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -3483,10 +3474,15 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
- hclge_handle_hw_msix_error(hdev, addr);
+ if (hclge_handle_hw_msix_error(hdev, addr))
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+
clear_bit(HNAE3_UNKNOWN_RESET, addr);
/* We defered the clearing of the error event which caused
* interrupt since it was not posssible to do that in
@@ -3551,23 +3547,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
-static int hclge_reset_prepare_down(struct hclge_dev *hdev)
-{
- int ret = 0;
-
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- /* fall through */
- case HNAE3_FLR_RESET:
- ret = hclge_set_all_vf_rst(hdev, true);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
@@ -3581,6 +3560,19 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
}
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_all_vf_rst(hdev, true);
+ if (ret)
+ return ret;
+
+ hclge_func_reset_sync_vf(hdev);
+
+ return 0;
+}
+
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
u32 reg_val;
@@ -3588,10 +3580,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* to confirm whether all running VF is ready
- * before request PF reset
- */
- ret = hclge_func_reset_sync_vf(hdev);
+ ret = hclge_func_reset_notify_vf(hdev);
if (ret)
return ret;
@@ -3611,16 +3600,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* to confirm whether all running VF is ready
- * before request PF reset
- */
- ret = hclge_func_reset_sync_vf(hdev);
+ ret = hclge_func_reset_notify_vf(hdev);
if (ret)
return ret;
-
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
hclge_handle_imp_error(hdev);
@@ -3672,6 +3654,8 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_dbg_dump_rst_info(hdev);
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
return false;
}
@@ -3747,10 +3731,9 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
-static void hclge_reset(struct hclge_dev *hdev)
+static int hclge_reset_prepare(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- enum hnae3_reset_type reset_level;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3761,45 +3744,41 @@ static void hclge_reset(struct hclge_dev *hdev)
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
- goto err_reset;
-
- ret = hclge_reset_prepare_down(hdev);
- if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclge_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- if (hclge_reset_wait(hdev))
- goto err_reset;
+ return hclge_reset_prepare_wait(hdev);
+}
+
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+ int ret;
hdev->rst_stats.hw_reset_done_cnt++;
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_reset_stack(hdev);
+ rtnl_unlock();
if (ret)
- goto err_reset_lock;
+ return ret;
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
if (ret)
- goto err_reset_lock;
+ return ret;
- rtnl_unlock();
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
@@ -3807,24 +3786,23 @@ static void hclge_reset(struct hclge_dev *hdev)
*/
if (ret &&
hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
+ if (ret)
+ return ret;
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request,
* it should be handled as soon as possible. since some errors
@@ -3835,10 +3813,22 @@ static void hclge_reset(struct hclge_dev *hdev)
if (reset_level != HNAE3_NONE_RESET)
set_bit(reset_level, &hdev->reset_request);
+ return 0;
+}
+
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ if (hclge_reset_prepare(hdev))
+ goto err_reset;
+
+ if (hclge_reset_wait(hdev))
+ goto err_reset;
+
+ if (hclge_reset_rebuild(hdev))
+ goto err_reset;
+
return;
-err_reset_lock:
- rtnl_unlock();
err_reset:
if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
@@ -3939,34 +3929,18 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_service_task(struct work_struct *work)
+static void hclge_reset_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, rst_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hclge_reset_subtask(hdev);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
-}
-
-static void hclge_mailbox_service_task(struct work_struct *work)
-{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, mbx_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
- return;
-
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
- hclge_mbx_handler(hdev);
-
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_update_vport_alive(struct hclge_dev *hdev)
@@ -3986,29 +3960,62 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
}
}
-static void hclge_service_task(struct work_struct *work)
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task.work);
+ unsigned long delta = round_jiffies_relative(HZ);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ /* Always handle the link updating to make sure link state is
+ * updated when it is triggered by mbx.
+ */
+ hclge_update_link_status(hdev);
- if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
- hclge_update_stats_for_all(hdev);
- hdev->hw_stats.stats_timer = 0;
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
- hclge_update_port_info(hdev);
- hclge_update_link_status(hdev);
+ hdev->serv_processed_cnt++;
hclge_update_vport_alive(hdev);
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
+ hclge_update_stats_for_all(hdev);
+
+ hclge_update_port_info(hdev);
hclge_sync_vlan_filter(hdev);
- if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
hclge_rfs_filter_expire(hdev);
- hdev->fd_arfs_expire_timer = 0;
- }
- hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclge_task_schedule(hdev, delta);
+}
+
+static void hclge_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, service_task.work);
+
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+ hclge_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclge_task_schedule() in
+ * hclge_periodic_service_task().
+ */
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4079,7 +4086,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "Get vector index fail. vector = %d\n", vector);
return vector_id;
}
@@ -4654,7 +4661,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "failed to get vector index. vector=%d\n", vector);
return vector_id;
}
@@ -6562,7 +6569,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en);
- ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
if (ret)
dev_err(&hdev->pdev->dev,
"serdes loopback config mac mode timeout\n");
@@ -6620,7 +6627,7 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
hclge_cfg_mac_mode(hdev, en);
- ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
if (ret)
dev_err(&hdev->pdev->dev,
"phy loopback config mac mode timeout\n");
@@ -6734,6 +6741,19 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static void hclge_flush_link_update(struct hclge_dev *hdev)
+{
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6742,12 +6762,12 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
if (enable) {
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- /* Set the DOWN flag here to disable the service to be
- * scheduled again
- */
+ /* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- cancel_delayed_work_sync(&hdev->service_task);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclge_flush_link_update(hdev);
}
}
@@ -7483,7 +7503,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
@@ -7496,7 +7515,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
kfree(mac);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
@@ -8257,7 +8275,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -8265,7 +8282,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static void hclge_restore_vlan_table(struct hnae3_handle *handle)
@@ -8277,7 +8293,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
u16 state, vlan_id;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
@@ -8303,8 +8318,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
break;
}
}
-
- mutex_unlock(&hdev->vport_cfg_mutex);
}
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -9256,6 +9269,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
@@ -9269,38 +9283,57 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
}
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_FLR_WAIT_MS 100
-#define HCLGE_FLR_WAIT_CNT 50
- struct hclge_dev *hdev = ae_dev->priv;
- int cnt = 0;
+#define HCLGE_FLR_RETRY_WAIT_MS 500
+#define HCLGE_FLR_RETRY_CNT 5
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclge_reset_event(hdev->pdev, NULL);
+ struct hclge_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGE_FLR_WAIT_CNT)
- msleep(HCLGE_FLR_WAIT_MS);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclge_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+ /* disable misc vector before FLR done */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.flr_rst_cnt++;
}
static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_reset_rebuild(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -9342,21 +9375,17 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
- mutex_init(&hdev->vport_cfg_mutex);
spin_lock_init(&hdev->fd_rule_lock);
+ sema_init(&hdev->reset_sem, 1);
ret = hclge_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI init failed\n");
+ if (ret)
goto out;
- }
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
+ if (ret)
goto err_pci_uninit;
- }
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -9364,11 +9393,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
- if (ret) {
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
- ret);
+ if (ret)
goto err_cmd_uninit;
- }
ret = hclge_configure(hdev);
if (ret) {
@@ -9383,12 +9409,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev,
- "Misc IRQ(vector0) init error, ret = %d.\n",
- ret);
+ if (ret)
goto err_msi_uninit;
- }
ret = hclge_alloc_tqps(hdev);
if (ret) {
@@ -9397,31 +9419,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_alloc_vport(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
ret = hclge_map_tqp(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
}
ret = hclge_init_umv_space(hdev);
- if (ret) {
- dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ if (ret)
goto err_mdiobus_unreg;
- }
ret = hclge_mac_init(hdev);
if (ret) {
@@ -9477,8 +9490,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
- INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Setup affinity after service timer setup because add_timer_on
* is called in affinity notify.
@@ -9512,6 +9523,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_mdiobus_unreg:
@@ -9534,7 +9547,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
- memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -9895,7 +9908,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
- mutex_destroy(&hdev->vport_cfg_mutex);
ae_dev->priv = NULL;
}
@@ -10157,10 +10169,8 @@ static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
int *bd_num_list,
u32 type_num)
{
-#define HCLGE_DFX_REG_BD_NUM 4
-
u32 entries_per_desc, desc_index, index, offset, i;
- struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int ret;
ret = hclge_query_bd_num_cmd_send(hdev, desc);
@@ -10273,10 +10283,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ if (!desc_src)
return -ENOMEM;
- }
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
@@ -10611,6 +10619,12 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ if (!hclge_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algo);
return 0;
@@ -10619,6 +10633,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
hnae3_unregister_ae_algo(&ae_algo);
+ destroy_workqueue(hclge_wq);
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebb4c6e9aed3..f78cbb4cc85e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -139,6 +139,8 @@
#define HCLGE_PHY_MDIX_STATUS_B 6
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
+
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
@@ -208,13 +210,14 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_NIC_REGISTERED,
HCLGE_STATE_ROCE_REGISTERED,
HCLGE_STATE_SERVICE_INITED,
- HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_RST_SERVICE_SCHED,
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
+ HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX
};
@@ -454,11 +457,7 @@ struct hclge_mac_stats {
u64 mac_rx_ctrl_pkt_num;
};
-#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
-struct hclge_hw_stats {
- struct hclge_mac_stats mac_stats;
- u32 stats_timer;
-};
+#define HCLGE_STATS_TIMER_INTERVAL 300UL
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
@@ -549,7 +548,7 @@ struct key_info {
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
-#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
+#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
@@ -712,7 +711,7 @@ struct hclge_dev {
struct hnae3_ae_dev *ae_dev;
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
- struct hclge_hw_stats hw_stats;
+ struct hclge_mac_stats mac_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -723,6 +722,7 @@ struct hclge_dev {
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
struct hclge_rst_stats rst_stats;
+ struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -774,8 +774,6 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list reset_timer;
struct delayed_work service_task;
- struct work_struct rst_service_task;
- struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -811,7 +809,8 @@ struct hclge_dev {
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
- u16 fd_arfs_expire_timer;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
@@ -825,8 +824,6 @@ struct hclge_dev {
u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
- struct mutex vport_cfg_mutex; /* Protect stored vf table */
-
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 0b433ebe6a2d..a3c0822191a9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -86,10 +86,12 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- enum hnae3_reset_type reset_type;
+ u16 reset_type;
u8 msg_data[2];
u8 dest_vfid;
+ BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
+
dest_vfid = (u8)vport->vport_id;
if (hdev->reset_type == HNAE3_FUNC_RESET)
@@ -635,7 +637,6 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
#define LINK_STATUS_OFFSET 1
#define LINK_FAIL_CODE_OFFSET 2
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
hclge_task_schedule(hdev, 0);
if (!req->msg[LINK_STATUS_OFFSET])
@@ -798,13 +799,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_link_mode(vport, req);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
- mutex_lock(&hdev->vport_cfg_mutex);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true);
- mutex_unlock(&hdev->vport_cfg_mutex);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
ret = hclge_get_vf_media_type(vport, req);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index af2245e3bb95..f38d236ebf4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -443,7 +443,7 @@ void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
- clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 25d78a5aaa34..d6597206e692 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -16,6 +16,8 @@
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
+static struct workqueue_struct *hclgevf_wq;
+
static const struct pci_device_id ae_algovf_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
@@ -440,6 +442,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
struct hnae3_client *rclient;
struct hnae3_client *client;
+ if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
client = handle->client;
rclient = hdev->roce_client;
@@ -452,6 +457,8 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
rclient->ops->link_status_change(rhandle, !!link_state);
hdev->hw.mac.link = link_state;
}
+
+ clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
}
static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
@@ -1309,14 +1316,13 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto));
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
-
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
@@ -1404,32 +1410,6 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
return ret;
}
-static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
-}
-
-static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
- unsigned long delay_us,
- unsigned long wait_cnt)
-{
- unsigned long cnt = 0;
-
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < wait_cnt)
- usleep_range(delay_us, delay_us * 2);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1440,11 +1420,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- if (hdev->reset_type == HNAE3_FLR_RESET)
- return hclgevf_flr_poll_timeout(hdev,
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_CNT);
- else if (hdev->reset_type == HNAE3_VF_RESET)
+ if (hdev->reset_type == HNAE3_VF_RESET)
ret = readl_poll_timeout(hdev->hw.io_base +
HCLGEVF_VF_RST_ING, val,
!(val & HCLGEVF_VF_RST_ING_BIT),
@@ -1516,7 +1492,8 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
/* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false);
- return 0;
+ /* bring up the nic to enable TX/RX again */
+ return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1525,18 +1502,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
int ret = 0;
- switch (hdev->reset_type) {
- case HNAE3_VF_FUNC_RESET:
+ if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
hdev->rst_stats.vf_func_rst_cnt++;
- break;
- case HNAE3_FLR_RESET:
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
- break;
- default:
- break;
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -1591,11 +1560,12 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
} else {
+ set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
hclgevf_dump_rst_info(hdev);
}
}
-static int hclgevf_reset(struct hclgevf_dev *hdev)
+static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
@@ -1605,61 +1575,64 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
*/
ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++;
- rtnl_lock();
+ rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclgevf_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- /* check if VF could successfully fetch the hardware reset completion
- * status from the hardware
- */
- ret = hclgevf_reset_wait(hdev);
- if (ret) {
- /* can't do much in this situation, will disable VF */
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to fetch H/W reset completion status\n",
- ret);
- goto err_reset;
- }
+ return hclgevf_reset_prepare_wait(hdev);
+}
+
+static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
hdev->rst_stats.hw_rst_done_cnt++;
rtnl_lock();
-
/* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
+ rtnl_unlock();
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
- goto err_reset_lock;
+ return ret;
}
- /* bring up the nic to enable TX/RX again */
- ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
- rtnl_unlock();
-
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ return 0;
+}
+
+static void hclgevf_reset(struct hclgevf_dev *hdev)
+{
+ if (hclgevf_reset_prepare(hdev))
+ goto err_reset;
+
+ /* check if VF could successfully fetch the hardware reset completion
+ * status from the hardware
+ */
+ if (hclgevf_reset_wait(hdev)) {
+ /* can't do much in this situation, will disable VF */
+ dev_err(&hdev->pdev->dev,
+ "failed to fetch H/W reset completion status\n");
+ goto err_reset;
+ }
+
+ if (hclgevf_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
- return ret;
-err_reset_lock:
- rtnl_unlock();
err_reset:
hclgevf_reset_err_handle(hdev);
-
- return ret;
}
static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
@@ -1722,25 +1695,60 @@ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
set_bit(rst_type, &hdev->default_reset_request);
}
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGEVF_FLR_WAIT_MS 100
-#define HCLGEVF_FLR_WAIT_CNT 50
+#define HCLGEVF_FLR_RETRY_WAIT_MS 500
+#define HCLGEVF_FLR_RETRY_CNT 5
+
struct hclgevf_dev *hdev = ae_dev->priv;
- int cnt = 0;
+ int retry_cnt = 0;
+ int ret;
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclgevf_reset_event(hdev->pdev, NULL);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclgevf_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGEVF_FLR_WAIT_CNT)
- msleep(HCLGEVF_FLR_WAIT_MS);
+ /* disable misc vector before FLR done */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ hdev->rst_stats.flr_rst_cnt++;
+}
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclgevf_reset_rebuild(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
+ ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1767,62 +1775,37 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
- set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->rst_service_task);
- }
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
}
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
- set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->mbx_service_task);
- }
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
}
-static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay)
{
- if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
- !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
}
-static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
-{
- /* if we have any pending mailbox event then schedule the mbx task */
- if (hdev->mbx_event_pending)
- hclgevf_mbx_task_schedule(hdev);
-
- if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
- hclgevf_reset_task_schedule(hdev);
-}
-
-static void hclgevf_service_timer(struct timer_list *t)
-{
- struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies +
- HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
-
- hdev->stats_timer++;
- hclgevf_task_schedule(hdev);
-}
-
-static void hclgevf_reset_service_task(struct work_struct *work)
+static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
{
#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
- struct hclgevf_dev *hdev =
- container_of(work, struct hclgevf_dev, rst_service_task);
- int ret;
-
- if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
@@ -1836,12 +1819,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
hdev->last_reset_time = jiffies;
while ((hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET) {
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF stack reset failed %d.\n", ret);
- }
+ != HNAE3_NONE_RESET)
+ hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1882,42 +1861,29 @@ static void hclgevf_reset_service_task(struct work_struct *work)
hclgevf_reset_task_schedule(hdev);
}
+ hdev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
-static void hclgevf_mailbox_service_task(struct work_struct *work)
+static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
{
- struct hclgevf_dev *hdev;
-
- hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+ if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ return;
if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
return;
- clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
hclgevf_mbx_async_handler(hdev);
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
-static void hclgevf_keep_alive_timer(struct timer_list *t)
-{
- struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
-
- schedule_work(&hdev->keep_alive_task);
- mod_timer(&hdev->keep_alive_timer, jiffies +
- HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
-}
-
-static void hclgevf_keep_alive_task(struct work_struct *work)
+static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
{
- struct hclgevf_dev *hdev;
u8 respmsg;
int ret;
- hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
-
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
@@ -1928,19 +1894,32 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
"VF sends keep alive cmd failed(=%d)\n", ret);
}
-static void hclgevf_service_task(struct work_struct *work)
+static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *handle;
- struct hclgevf_dev *hdev;
+ unsigned long delta = round_jiffies_relative(HZ);
+ struct hnae3_handle *handle = &hdev->nic;
- hdev = container_of(work, struct hclgevf_dev, service_task);
- handle = &hdev->nic;
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
- if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
- hclgevf_tqps_update_stats(handle);
- hdev->stats_timer = 0;
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
+ hdev->serv_processed_cnt++;
+ if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
+ hclgevf_keep_alive(hdev);
+
+ if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
+ hclgevf_tqps_update_stats(handle);
+
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
*/
@@ -1950,9 +1929,27 @@ static void hclgevf_service_task(struct work_struct *work)
hclgevf_sync_vlan_filter(hdev);
- hclgevf_deferred_task_schedule(hdev);
+ hdev->last_serv_processed = jiffies;
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+out:
+ hclgevf_task_schedule(hdev, delta);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
+ service_task.work);
+
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+ hclgevf_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclgevf_task_schedule() in
+ * hclgevf_periodic_service_task()
+ */
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
}
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
@@ -2010,11 +2007,6 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
-static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
-{
- writel(en ? 1 : 0, vector->addr);
-}
-
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
enum hclgevf_evt_cause event_cause;
@@ -2189,16 +2181,31 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
false);
}
+static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclgevf_task_schedule(hdev, 0);
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclgevf_flush_link_update(hdev);
}
}
@@ -2245,16 +2252,12 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
ret = hclgevf_set_alive(handle, true);
if (ret)
return ret;
- mod_timer(&hdev->keep_alive_timer, jiffies +
- HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
-
return 0;
}
@@ -2267,27 +2270,18 @@ static void hclgevf_client_stop(struct hnae3_handle *handle)
if (ret)
dev_warn(&hdev->pdev->dev,
"%s failed %d\n", __func__, ret);
-
- del_timer_sync(&hdev->keep_alive_timer);
- cancel_work_sync(&hdev->keep_alive_task);
}
static void hclgevf_state_init(struct hclgevf_dev *hdev)
{
- /* setup tasks for the MBX */
- INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
- /* setup tasks for service timer */
- timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
-
- INIT_WORK(&hdev->service_task, hclgevf_service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
-
- INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
mutex_init(&hdev->mbx_resp.mbx_mutex);
+ sema_init(&hdev->reset_sem, 1);
/* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
@@ -2298,18 +2292,8 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
- if (hdev->keep_alive_timer.function)
- del_timer_sync(&hdev->keep_alive_timer);
- if (hdev->keep_alive_task.func)
- cancel_work_sync(&hdev->keep_alive_task);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
mutex_destroy(&hdev->mbx_resp.mbx_mutex);
}
@@ -2383,8 +2367,10 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
hclgevf_get_misc_vector(hdev);
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGEVF_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
- 0, "hclgevf_cmd", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
hdev->misc_vector.vector_irq);
@@ -2611,11 +2597,11 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M,
HCLGEVF_MSIX_OFT_ROCEE_S);
hdev->num_roce_msix =
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */
@@ -2628,7 +2614,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
hdev->num_nic_msix = hdev->num_msi;
@@ -2725,16 +2711,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
int ret;
ret = hclgevf_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI initialization failed\n");
+ if (ret)
return ret;
- }
ret = hclgevf_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ if (ret)
goto err_cmd_queue_init;
- }
ret = hclgevf_cmd_init(hdev);
if (ret)
@@ -2742,11 +2724,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
/* Get vf resource */
ret = hclgevf_query_vf_resource(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query vf status error, ret = %d.\n", ret);
+ if (ret)
goto err_cmd_init;
- }
ret = hclgevf_init_msi(hdev);
if (ret) {
@@ -2756,13 +2735,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev);
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ hdev->reset_type = HNAE3_NONE_RESET;
ret = hclgevf_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
- ret);
+ if (ret)
goto err_misc_irq_init;
- }
set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
@@ -2779,10 +2756,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
ret = hclgevf_set_handle_info(hdev);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ if (ret)
goto err_config;
- }
ret = hclgevf_config_gro(hdev, true);
if (ret)
@@ -2807,6 +2782,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_config:
@@ -2838,7 +2815,6 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2853,10 +2829,6 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hdev = ae_dev->priv;
- timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
- INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
-
return 0;
}
@@ -3213,6 +3185,12 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
+ if (!hclgevf_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algovf);
return 0;
@@ -3221,6 +3199,7 @@ static int hclgevf_init(void)
static void hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
+ destroy_workqueue(hclgevf_wq);
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2f4c81bf4169..fee8d97f323c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -142,12 +142,13 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
/* task states */
- HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
+ HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_RST_FAIL,
};
struct hclgevf_mac {
@@ -220,6 +221,7 @@ struct hclgevf_rss_cfg {
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
};
struct hclgevf_rst_stats {
@@ -251,6 +253,7 @@ struct hclgevf_dev {
unsigned long reset_state; /* requested, pending */
struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
+ struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
u16 num_tqps; /* num task queue pairs of this PF */
@@ -283,12 +286,7 @@ struct hclgevf_dev {
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
- struct timer_list service_timer;
- struct timer_list keep_alive_timer;
- struct work_struct service_task;
- struct work_struct keep_alive_task;
- struct work_struct rst_service_task;
- struct work_struct mbx_service_task;
+ struct delayed_work service_task;
struct hclgevf_tqp *htqp;
@@ -298,7 +296,8 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
- u32 stats_timer;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2411ad270c98..02a14f5e7fe3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -766,7 +766,7 @@ static void hinic_set_rx_mode(struct net_device *netdev)
queue_work(nic_dev->workq, &rx_mode_work->work);
}
-static void hinic_tx_timeout(struct net_device *netdev)
+static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 92929750f832..bef676d93339 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -363,7 +363,7 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void print_eth(unsigned char *buf, char *str);
static void set_multicast_list(struct net_device *dev);
@@ -1019,7 +1019,7 @@ err_irq_dev:
return res;
}
-static void i596_tx_timeout (struct net_device *dev)
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct i596_private *lp = dev->ml_priv;
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index bb3b8adbe4f0..a0bfb509e002 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -66,7 +66,7 @@ static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
-static void ether1_timeout(struct net_device *dev);
+static void ether1_timeout(struct net_device *dev, unsigned int txqueue);
/* ------------------------------------------------------------------------- */
@@ -650,7 +650,7 @@ ether1_open (struct net_device *dev)
}
static void
-ether1_timeout(struct net_device *dev)
+ether1_timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
dev->name);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f9742af7f142..b03757e169e4 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -351,7 +351,7 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void print_eth(unsigned char *buf, char *str);
static void set_multicast_list(struct net_device *dev);
static inline void ca(struct net_device *dev);
@@ -936,7 +936,7 @@ out_remove_rx_bufs:
return -EAGAIN;
}
-static void i596_tx_timeout (struct net_device *dev)
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct i596_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6436a98c5953..22f5887578b2 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -91,10 +91,10 @@ static int sni_82596_probe(struct platform_device *dev)
idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
if (!res || !ca || !options || !idprom)
return -ENODEV;
- mpu_addr = ioremap_nocache(res->start, 4);
+ mpu_addr = ioremap(res->start, 4);
if (!mpu_addr)
return -ENOMEM;
- ca_addr = ioremap_nocache(ca->start, 4);
+ ca_addr = ioremap(ca->start, 4);
if (!ca_addr)
goto probe_failed_free_mpu;
@@ -110,7 +110,7 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->base_addr = res->start;
netdevice->irq = platform_get_irq(dev, 0);
- eth_addr = ioremap_nocache(idprom->start, 0x10);
+ eth_addr = ioremap(idprom->start, 0x10);
if (!eth_addr)
goto probe_failed;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 1a86184d44c0..4564ee02c95f 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -125,7 +125,7 @@ static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void sun3_82586_timeout(struct net_device *dev);
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue);
#if 0
static void sun3_82586_dump(struct net_device *,void *);
#endif
@@ -965,7 +965,7 @@ static void startrecv586(struct net_device *dev)
WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
}
-static void sun3_82586_timeout(struct net_device *dev)
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
struct priv *p = netdev_priv(dev);
#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 13e30eba5349..0273fb7a9d01 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2786,7 +2786,7 @@ out:
return;
}
-static void ehea_tx_watchdog(struct net_device *dev)
+static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct ehea_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 2e40425d8a34..b7fc17756c51 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -776,7 +776,7 @@ static void emac_reset_work(struct work_struct *work)
mutex_unlock(&dev->link_lock);
}
-static void emac_tx_timeout(struct net_device *ndev)
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct emac_instance *dev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 830791ab4619..c75239d8820f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2282,7 +2282,7 @@ err:
return -ret;
}
-static void ibmvnic_tx_timeout(struct net_device *dev)
+static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a65d5a9ba7db..1b8d015ebfb0 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2316,7 +2316,7 @@ static void e100_down(struct nic *nic)
e100_rx_clean_list(nic);
}
-static void e100_tx_timeout(struct net_device *netdev)
+static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nic *nic = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index aca97b084003..2bced34c19ba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -134,7 +134,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
-static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void e1000_reset_task(struct work_struct *work);
static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
@@ -3488,7 +3488,7 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6c51b1bad8c4..37a2314d3e6b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -185,13 +185,12 @@ struct e1000_phy_regs {
/* board specific private data structure */
struct e1000_adapter {
+ struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
- struct delayed_work watchdog_task;
-
- struct workqueue_struct *e1000_workqueue;
+ struct work_struct watchdog_task;
const struct e1000_info *ei;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fe7997c18a10..db4ea58bac82 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
@@ -4723,7 +4721,7 @@ int e1000e_close(struct net_device *netdev)
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
}
napi_disable(&adapter->napi);
@@ -5073,12 +5071,13 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name, adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
- (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
- (ctrl & E1000_CTRL_RFCE) ? "Rx" :
- (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+ netdev_info(adapter->netdev,
+ "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -5155,11 +5154,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
}
}
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+ struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
- watchdog_task.work);
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -5307,7 +5320,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_stop_queue(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5407,9 +5420,8 @@ link_up:
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
- queue_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task,
- round_jiffies(2 * HZ));
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -5929,7 +5941,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7449,21 +7461,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- e1000e_driver_name);
-
- if (!adapter->e1000_workqueue) {
- err = -ENOMEM;
- goto err_workqueue;
- }
-
- INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
- queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
- 0);
-
+ timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7557,9 +7559,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_register:
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
@@ -7604,17 +7603,15 @@ static void e1000_remove(struct pci_dev *pdev)
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
- cancel_delayed_work(&adapter->watchdog_task);
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 68baee04dc58..0637ccadee79 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -696,21 +696,24 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
/**
* fm10k_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: the index of the Tx queue that timed out
**/
-static void fm10k_tx_timeout(struct net_device *netdev)
+static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_ring *tx_ring;
bool real_tx_hang = false;
- int i;
-
-#define TX_TIMEO_LIMIT 16000
- for (i = 0; i < interface->num_tx_queues; i++) {
- struct fm10k_ring *tx_ring = interface->tx_ring[i];
- if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
- real_tx_hang = true;
+ if (txqueue >= interface->num_tx_queues) {
+ WARN(1, "invalid Tx queue index %d", txqueue);
+ return;
}
+ tx_ring = interface->tx_ring[txqueue];
+ if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
+ real_tx_hang = true;
+
+#define TX_TIMEO_LIMIT 16000
if (real_tx_hang) {
fm10k_tx_timeout_reset(interface);
} else {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9f0a4e92a231..37514a75f928 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
/* fall through */
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d4055037af89..45b90eb11adb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
- hw_dbg(hw, "Buffer to small for PBA data.\n");
+ hw_dbg(hw, "Buffer too small for PBA data.\n");
return I40E_ERR_PARAM;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2c5af6d4a6b1..8c3e753bfb9d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -301,43 +301,24 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
-static void i40e_tx_timeout(struct net_device *netdev)
+static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *tx_ring = NULL;
- unsigned int i, hung_queue = 0;
+ unsigned int i;
u32 head, val;
pf->tx_timeout_count++;
- /* find the stopped queue the same way the stack does */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
- unsigned long trans_start;
-
- q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start;
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
- hung_queue = i;
- break;
- }
- }
-
- if (i == netdev->num_tx_queues) {
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- } else {
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (hung_queue ==
- vsi->tx_rings[i]->queue_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
+ /* with txqueue index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (txqueue ==
+ vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
}
}
}
@@ -363,14 +344,14 @@ static void i40e_tx_timeout(struct net_device *netdev)
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
- vsi->seid, hung_queue, tx_ring->next_to_clean,
+ vsi->seid, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use,
readl(tx_ring->tail), val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
- pf->tx_timeout_recovery_level, hung_queue);
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
case 1:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 6a3f0fc56c3b..69523ac85639 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
}
/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
- vqs->rx_queues > I40E_MAX_VF_QUEUES ||
- vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+ if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index f73cd917c44f..42058fad6a3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -269,7 +269,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -306,7 +306,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 29de3ae96ef2..bd1b1ed323f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 821987da5698..62fe56ddcb6e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -159,7 +159,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void iavf_tx_timeout(struct net_device *netdev)
+static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
*
* Returns ptr to the filter object or NULL when no memory available.
**/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
- const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
struct iavf_mac_filter *f;
@@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work)
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
- struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@@ -2181,6 +2180,16 @@ continue_reset:
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have
+ * been changed by the PF via administratively set MAC.
+ * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index c46770eba320..1ab9cb339acb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 7cb829132d28..59544b0fc086 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,7 +17,8 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
- ice_flex_pipe.o \
+ ice_flex_pipe.o \
+ ice_flow.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index f972dce8aebb..cb10abb14e11 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -174,6 +174,8 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
+ struct ice_vsi *dflt_vsi; /* default VSI for this switch */
+ u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_state {
@@ -275,6 +277,7 @@ struct ice_vsi {
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
u8 vlan_ena:1;
+ u16 num_vlan;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -462,12 +465,13 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
{
struct xdp_umem **umems = ring->vsi->xsk_umems;
- int qid = ring->q_index;
+ u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ !ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
return umems[qid];
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5421fc413f94..4459bc564b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -232,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+
+#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
+#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+
+#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
@@ -1849,6 +1856,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* debug commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 77d6a0291e97..d8e975cceb21 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -93,7 +93,8 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector and set default value for ITR setting associated
+ * with this q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
@@ -108,6 +109,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
+ q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
+ q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */
@@ -299,6 +302,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
@@ -323,7 +327,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index);
} else {
+ ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index);
@@ -674,10 +680,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
@@ -688,10 +690,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index fb1d930470c7..0207e28c2682 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4,28 +4,10 @@
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
+#include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 200
-#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
- ((ICE_RX_OPC_MDID << \
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
- (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
- GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
-
-#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
- wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
- (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
- (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
- (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
- (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
-
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -348,88 +330,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
- * ice_init_flex_flags
- * @hw: pointer to the hardware structure
- * @prof_id: Rx Descriptor Builder profile ID
- *
- * Function to initialize Rx flex flags
- */
-static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
-{
- u8 idx = 0;
-
- /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
- * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
- * flexiflags1[3:0] - Not used for flag programming
- * flexiflags2[7:0] - Tunnel and VLAN types
- * 2 invalid fields in last index
- */
- switch (prof_id) {
- /* Rx flex flags are currently programmed for the NIC profiles only.
- * Different flag bit programming configurations can be added per
- * profile as needed.
- */
- case ICE_RXDID_FLEX_NIC:
- case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
- ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
- ICE_FLG_FIN, idx++);
- /* flex flag 1 is not used for flexi-flag programming, skipping
- * these four FLG64 bits.
- */
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
- ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
- ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
- ICE_FLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
- ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
- ICE_FLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
- ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
- break;
-
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Flag programming for profile ID %d not supported\n",
- prof_id);
- }
-}
-
-/**
- * ice_init_flex_flds
- * @hw: pointer to the hardware structure
- * @prof_id: Rx Descriptor Builder profile ID
- *
- * Function to initialize flex descriptors
- */
-static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
-{
- enum ice_flex_rx_mdid mdid;
-
- switch (prof_id) {
- case ICE_RXDID_FLEX_NIC:
- case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
-
- mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
- ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
-
- ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
-
- ice_init_flex_flags(hw, prof_id);
- break;
-
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Field init for profile ID %d not supported\n",
- prof_id);
- }
-}
-
-/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
@@ -882,9 +782,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
-
- ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
- ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1601,6 +1498,114 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_aq_alloc_free_res - command to allocate/free resources
+ * @hw: pointer to the HW struct
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @opc: pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Helper function to allocate/free resources using the admin queue commands
+ */
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_alloc_free_res_cmd *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.sw_res_ctrl;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = cpu_to_le16(num_entries);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @btm: allocate from bottom
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(buf, elem, num - 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to allocate resource. */
+ buf->num_elems = cpu_to_le16(num);
+ buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
+ ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
+ if (btm)
+ buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto ice_alloc_res_exit;
+
+ memcpy(res, buf->elem, sizeof(buf->elem) * num);
+
+ice_alloc_res_exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated HW resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(buf, elem, num - 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to free resource. */
+ buf->num_elems = cpu_to_le16(num);
+ buf->res_type = cpu_to_le16(type);
+ memcpy(buf->elem, res, sizeof(buf->elem) * num);
+
+ status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+ kfree(buf);
+ return status;
+}
+
+/**
* ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the HW structure
* @max: value to be evenly split between each PF
@@ -3510,7 +3515,10 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
if (status)
return status;
}
-
+ /* Replay per VSI all RSS configurations */
+ status = ice_replay_rss_cfg(hw, vsi_handle);
+ if (status)
+ return status;
/* Replay per VSI all filters */
status = ice_replay_vsi_all_fltr(hw, vsi_handle);
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index b22aa561e253..b5c013fdaaf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -34,10 +34,18 @@ enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index d3d3ec29def9..0664e5b8d130 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -396,6 +396,12 @@ dcb_error:
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ /* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
+ * here as is done for other calls to that function. That check is
+ * not necessary since this is in this function's error cleanup path.
+ * Suppress the Coverity warning with the following comment...
+ */
+ /* coverity[check_return] */
ice_pf_dcb_cfg(pf, prev_cfg, false);
kfree(prev_cfg);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index f8d5c661d0ba..ce63017c56c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -11,5 +11,23 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Connection E822-C for backplane */
+#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection E822-C for QSFP */
+#define ICE_DEV_ID_E822C_QSFP 0x1891
+/* Intel(R) Ethernet Connection E822-C for SFP */
+#define ICE_DEV_ID_E822C_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-X for backplane */
+#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9ebd93e79aeb..90c6a3ca20c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,7 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -283,12 +284,15 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- struct ice_vf *vf = pf->vf;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++)
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
return true;
+ }
+
return false;
}
@@ -2531,6 +2535,243 @@ done:
}
/**
+ * ice_parse_hdrs - parses headers from RSS hash input
+ * @nfc: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * header types for RSS configuration
+ */
+static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+{
+ u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ default:
+ break;
+ }
+ return hdrs;
+}
+
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
+/**
+ * ice_parse_hash_flds - parses hash fields from RSS hash input
+ * @nfc: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * hash fields for RSS configuration
+ */
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+{
+ u64 hfld = ICE_HASH_INVALID;
+
+ if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (nfc->data & RXH_IP_SRC)
+ hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
+ if (nfc->data & RXH_IP_DST)
+ hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (nfc->data & RXH_IP_SRC)
+ hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
+ if (nfc->data & RXH_IP_DST)
+ hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return hfld;
+}
+
+/**
+ * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @vsi: the VSI being configured
+ * @nfc: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ u64 hashed_flds;
+ u32 hdrs;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ hashed_flds = ice_parse_hash_flds(nfc);
+ if (hashed_flds == ICE_HASH_INVALID) {
+ dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ hdrs = ice_parse_hdrs(nfc);
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
+ dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ if (status) {
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
+ * @vsi: the VSI being configured
+ * @nfc: ethtool rxnfc command
+ */
+static void
+ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ u64 hash_flds;
+ u32 hdrs;
+
+ dev = ice_pf_to_dev(pf);
+
+ nfc->data = 0;
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ hdrs = ice_parse_hdrs(nfc);
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
+ dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ if (hash_flds == ICE_HASH_INVALID) {
+ dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
+ hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
+ nfc->data |= (u64)RXH_IP_SRC;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
+ hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
+ nfc->data |= (u64)RXH_IP_DST;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
+ nfc->data |= (u64)RXH_L4_B_0_1;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
+ nfc->data |= (u64)RXH_L4_B_2_3;
+}
+
+/**
+ * ice_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ return ice_set_rss_hash_opt(vsi, cmd);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -2551,6 +2792,10 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
+ case ETHTOOL_GRXFH:
+ ice_get_rss_hash_opt(vsi, cmd);
+ ret = 0;
+ break;
default:
break;
}
@@ -3585,6 +3830,53 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
}
/**
+ * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ *
+ * Print netdev info if driver doesn't support one of the parameters
+ * and return error. When any parameters will be implemented, remove only
+ * this parameter from param array.
+ */
+static int
+ice_is_coalesce_param_invalid(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ice_ethtool_not_used {
+ u32 value;
+ const char *name;
+ } param[] = {
+ {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
+ {ec->rate_sample_interval, "sample-interval"},
+ {ec->pkt_rate_low, "pkt-rate-low"},
+ {ec->pkt_rate_high, "pkt-rate-high"},
+ {ec->rx_max_coalesced_frames, "rx-frames"},
+ {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
+ {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
+ {ec->tx_max_coalesced_frames, "tx-frames"},
+ {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
+ {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
+ {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
+ {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
+ {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
+ {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
+ {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
+ {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ if (param[i].value) {
+ netdev_info(netdev, "Setting %s not supported\n",
+ param[i].name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
* __ice_set_coalesce - set ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
@@ -3600,6 +3892,9 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ if (ice_is_coalesce_param_invalid(netdev, ec))
+ return -EINVAL;
+
if (q_num < 0) {
int v_idx;
@@ -3804,6 +4099,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
+ .set_rxnfc = ice_set_rxnfc,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index cbd53b586c36..99208946224c 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3,6 +3,87 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
+#include "ice_flow.h"
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+ /* SWITCH */
+ {
+ ICE_SID_XLT0_SW,
+ ICE_SID_XLT_KEY_BUILDER_SW,
+ ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW,
+ ICE_SID_CDID_KEY_BUILDER_SW,
+ ICE_SID_CDID_REDIR_SW
+ },
+
+ /* ACL */
+ {
+ ICE_SID_XLT0_ACL,
+ ICE_SID_XLT_KEY_BUILDER_ACL,
+ ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL,
+ ICE_SID_CDID_KEY_BUILDER_ACL,
+ ICE_SID_CDID_REDIR_ACL
+ },
+
+ /* FD */
+ {
+ ICE_SID_XLT0_FD,
+ ICE_SID_XLT_KEY_BUILDER_FD,
+ ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD,
+ ICE_SID_CDID_KEY_BUILDER_FD,
+ ICE_SID_CDID_REDIR_FD
+ },
+
+ /* RSS */
+ {
+ ICE_SID_XLT0_RSS,
+ ICE_SID_XLT_KEY_BUILDER_RSS,
+ ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS,
+ ICE_SID_CDID_KEY_BUILDER_RSS,
+ ICE_SID_CDID_REDIR_RSS
+ },
+
+ /* PE */
+ {
+ ICE_SID_XLT0_PE,
+ ICE_SID_XLT_KEY_BUILDER_PE,
+ ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE,
+ ICE_SID_CDID_KEY_BUILDER_PE,
+ ICE_SID_CDID_REDIR_PE
+ }
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+ return ice_sect_lkup[blk][sect];
+}
/**
* ice_pkg_val_buf
@@ -158,6 +239,176 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
+/* Key creation */
+
+#define ICE_DC_KEY 0x1 /* don't care */
+#define ICE_DC_KEYINV 0x1
+#define ICE_NM_KEY 0x0 /* never match */
+#define ICE_NM_KEYINV 0x0
+#define ICE_0_KEY 0x1 /* match 0 */
+#define ICE_0_KEYINV 0x0
+#define ICE_1_KEY 0x0 /* match 1 */
+#define ICE_1_KEYINV 0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ * '0' = b01, always match a 0 bit
+ * '1' = b10, always match a 1 bit
+ * '?' = b11, don't care bit (always matches)
+ * '~' = b00, never match bit
+ *
+ * Input:
+ * val: b0 1 0 1 0 1
+ * dont_care: b0 0 1 1 0 0
+ * never_mtch: b0 0 0 0 1 1
+ * ------------------------------
+ * Result: key: b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+ u8 *key_inv)
+{
+ u8 in_key = *key, in_key_inv = *key_inv;
+ u8 i;
+
+ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+ return ICE_ERR_CFG;
+
+ *key = 0;
+ *key_inv = 0;
+
+ /* encode the 8 bits into 8-bit key and 8-bit key invert */
+ for (i = 0; i < 8; i++) {
+ *key >>= 1;
+ *key_inv >>= 1;
+
+ if (!(valid & 0x1)) { /* change only valid bits */
+ *key |= (in_key & 0x1) << 7;
+ *key_inv |= (in_key_inv & 0x1) << 7;
+ } else if (dont_care & 0x1) { /* don't care bit */
+ *key |= ICE_DC_KEY << 7;
+ *key_inv |= ICE_DC_KEYINV << 7;
+ } else if (nvr_mtch & 0x1) { /* never match bit */
+ *key |= ICE_NM_KEY << 7;
+ *key_inv |= ICE_NM_KEYINV << 7;
+ } else if (val & 0x01) { /* exact 1 match */
+ *key |= ICE_1_KEY << 7;
+ *key_inv |= ICE_1_KEYINV << 7;
+ } else { /* exact 0 match */
+ *key |= ICE_0_KEY << 7;
+ *key_inv |= ICE_0_KEYINV << 7;
+ }
+
+ dont_care >>= 1;
+ nvr_mtch >>= 1;
+ valid >>= 1;
+ val >>= 1;
+ in_key >>= 1;
+ in_key_inv >>= 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+ u16 count = 0;
+ u16 i;
+
+ /* check each byte */
+ for (i = 0; i < size; i++) {
+ /* if 0, go to next byte */
+ if (!mask[i])
+ continue;
+
+ /* We know there is at least one set bit in this byte because of
+ * the above check; if we already have found 'max' number of
+ * bits set, then we can return failure now.
+ */
+ if (count == max)
+ return false;
+
+ /* count the bits in this byte, checking threshold */
+ count += hweight8(mask[i]);
+ if (count > max)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the don't care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ * upd == NULL --> udp mask is all 1's (update all bits)
+ * dc == NULL --> dc mask is all 0's (no don't care bits)
+ * nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+static enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len)
+{
+ u16 half_size;
+ u16 i;
+
+ /* size must be a multiple of 2 bytes. */
+ if (size % 2)
+ return ICE_ERR_CFG;
+
+ half_size = size / 2;
+ if (off + len > half_size)
+ return ICE_ERR_CFG;
+
+ /* Make sure at most one bit is set in the never match mask. Having more
+ * than one never match mask bit set will cause HW to consume excessive
+ * power otherwise; this is a power management efficiency check.
+ */
+#define ICE_NVR_MTCH_BITS_MAX 1
+ if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+ return ICE_ERR_CFG;
+
+ for (i = 0; i < len; i++)
+ if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+ dc ? dc[i] : 0, nm ? nm[i] : 0,
+ key + off + i, key + half_size + off + i))
+ return ICE_ERR_CFG;
+
+ return 0;
+}
+
/**
* ice_acquire_global_cfg_lock
* @hw: pointer to the HW structure
@@ -205,6 +456,31 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
}
/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+static enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+static void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
* ice_aq_download_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package buffer to transfer
@@ -253,6 +529,54 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
* ice_find_seg_in_pkg
* @hw: pointer to the hardware structure
* @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
@@ -287,6 +611,44 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+static enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ u32 offset, info, i;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+
+ status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
@@ -767,6 +1129,169 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
return status;
}
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ devm_kfree(ice_hw_to_dev(hw), bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+static enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = le16_to_cpu(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = le16_to_cpu(buf->data_end) +
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = cpu_to_le16(data_end);
+
+ return 0;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+static void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = le16_to_cpu(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = le16_to_cpu(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
+ buf->section_entry[sect_count].size = cpu_to_le16(size);
+ buf->section_entry[sect_count].type = cpu_to_le32(type);
+
+ data_end += size;
+ buf->data_end = cpu_to_le16(data_end);
+
+ buf->section_count = cpu_to_le16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return le16_to_cpu(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
/* PTG Management */
/**
@@ -951,6 +1476,48 @@ enum ice_sid_all {
ICE_SID_OFF_COUNT,
};
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
+{
+ struct ice_vsig_prof *tmp1;
+ struct ice_vsig_prof *tmp2;
+ u16 chk_count = 0;
+ u16 count = 0;
+
+ /* compare counts */
+ list_for_each_entry(tmp1, list1, list)
+ count++;
+ list_for_each_entry(tmp2, list2, list)
+ chk_count++;
+ if (!count || count != chk_count)
+ return false;
+
+ tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
+ tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
+
+ /* profile cookies must compare, and in the exact same order to take
+ * into account priority
+ */
+ while (count--) {
+ if (tmp2->profile_cookie != tmp1->profile_cookie)
+ return false;
+
+ tmp1 = list_next_entry(tmp1, list);
+ tmp2 = list_next_entry(tmp2, list);
+ }
+
+ return true;
+}
+
/* VSIG Management */
/**
@@ -999,6 +1566,117 @@ static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
}
/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ return ice_vsig_alloc_val(hw, blk, i);
+
+ return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find VSI group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all VSIs under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the XLT2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+static enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *chs, u16 *vsig)
+{
+ struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+ u16 i;
+
+ for (i = 0; i < xlt2->count; i++)
+ if (xlt2->vsig_tbl[i].in_use &&
+ ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+ *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all VSIs associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+static enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ struct ice_vsig_prof *dtmp, *del;
+ struct ice_vsig_vsi *vsi_cur;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+ if (idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the
+ * list and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ /* remove all vsis associated with this VSIG XLT2 entry */
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ /* NULL terminate head of VSI list */
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+ }
+
+ /* free characteristic list */
+ list_for_each_entry_safe(del, dtmp,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ list_del(&del->list);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+ return 0;
+}
+
+/**
* ice_vsig_remove_vsi - remove VSI from VSIG
* @hw: pointer to the hardware structure
* @blk: HW block
@@ -1117,6 +1795,215 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
return 0;
}
+/**
+ * ice_find_prof_id - find profile ID for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile ID
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u8 *prof_id)
+{
+ struct ice_es *es = &hw->blk[blk].es;
+ u16 off, i;
+
+ for (i = 0; i < es->count; i++) {
+ off = i * es->fvw;
+
+ if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ continue;
+
+ *prof_id = i;
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile ID resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the TCAM for
+ * @tcam_idx: pointer to variable to receive the TCAM entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the TCAM entry
+ * @tcam_idx: the TCAM entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the profile ID for
+ * @prof_id: pointer to variable to receive the profile ID
+ *
+ * This function allocates a new profile ID, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+ enum ice_status status;
+ u16 res_type;
+ u16 get_prof;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
+ if (!status)
+ *prof_id = (u8)get_prof;
+
+ return status;
+}
+
+/**
+ * ice_free_prof_id - free profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID to free
+ *
+ * This function frees a profile ID, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ u16 tmp_prof_id = (u16)prof_id;
+ u16 res_type;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ hw->blk[blk].es.ref_count[prof_id]++;
+
+ return 0;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile ID to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+ struct ice_fv_word *fv)
+{
+ u16 off;
+
+ off = prof_id * hw->blk[blk].es.fvw;
+ if (!fv) {
+ memset(&hw->blk[blk].es.t[off], 0,
+ hw->blk[blk].es.fvw * sizeof(*fv));
+ hw->blk[blk].es.written[prof_id] = false;
+ } else {
+ memcpy(&hw->blk[blk].es.t[off], fv,
+ hw->blk[blk].es.fvw * sizeof(*fv));
+ }
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+ if (!--hw->blk[blk].es.ref_count[prof_id]) {
+ ice_write_es(hw, blk, prof_id, NULL);
+ return ice_free_prof_id(hw, blk, prof_id);
+ }
+ }
+
+ return 0;
+}
+
/* Block / table section IDs */
static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
/* SWITCH */
@@ -1374,16 +2261,85 @@ void ice_fill_blk_tbls(struct ice_hw *hw)
}
/**
+ * ice_free_prof_map - free profile map
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_es *es = &hw->blk[blk_idx].es;
+ struct ice_prof_map *del, *tmp;
+
+ mutex_lock(&es->prof_map_lock);
+ list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
+ list_del(&del->list);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+ INIT_LIST_HEAD(&es->prof_map);
+ mutex_unlock(&es->prof_map_lock);
+}
+
+/**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_flow_prof *p, *tmp;
+
+ mutex_lock(&hw->fl_profs_locks[blk_idx]);
+ list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
+ list_del(&p->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), p);
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk_idx]);
+
+ /* if driver is in reset and tables are being cleared
+ * re-initialize the flow profile list heads
+ */
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl)
+ return;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ ice_vsig_free(hw, blk, i);
+}
+
+/**
* ice_free_hw_tbls - free hardware table memory
* @hw: pointer to the hardware structure
*/
void ice_free_hw_tbls(struct ice_hw *hw)
{
+ struct ice_rss_cfg *r, *rt;
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
- hw->blk[i].is_list_init = false;
+ if (hw->blk[i].is_list_init) {
+ struct ice_es *es = &hw->blk[i].es;
+
+ ice_free_prof_map(hw, i);
+ mutex_destroy(&es->prof_map_lock);
+ ice_free_flow_profs(hw, i);
+ mutex_destroy(&hw->fl_profs_locks[i]);
+
+ hw->blk[i].is_list_init = false;
+ }
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
@@ -1397,10 +2353,26 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
}
+ list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ mutex_destroy(&hw->rss_locks);
memset(hw->blk, 0, sizeof(hw->blk));
}
/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ mutex_init(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
* ice_clear_hw_tbls - clear HW tables and flow profiles
* @hw: pointer to the hardware structure
*/
@@ -1415,6 +2387,13 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
struct ice_es *es = &hw->blk[i].es;
+ if (hw->blk[i].is_list_init) {
+ ice_free_prof_map(hw, i);
+ ice_free_flow_profs(hw, i);
+ }
+
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+
memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
memset(xlt1->ptg_tbl, 0,
ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
@@ -1443,6 +2422,8 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
+ mutex_init(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
@@ -1454,6 +2435,9 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
if (hw->blk[i].is_list_init)
continue;
+ ice_init_flow_profs(hw, i);
+ mutex_init(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
hw->blk[i].is_list_init = true;
hw->blk[i].overwrite = blk_sizes[i].overwrite;
@@ -1547,3 +2531,1580 @@ err:
ice_free_hw_tbls(hw);
return ICE_ERR_NO_MEMORY;
}
+
+/**
+ * ice_prof_gen_key - generate profile ID key
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile ID key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+ u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 key[ICE_TCAM_KEY_SZ])
+{
+ struct ice_prof_id_key inkey;
+
+ inkey.xlt1 = ptg;
+ inkey.xlt2_cdid = cpu_to_le16(vsig);
+ inkey.flags = cpu_to_le16(flags);
+
+ switch (hw->blk[blk].prof.cdid_bits) {
+ case 0:
+ break;
+ case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
+ break;
+ case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
+ break;
+ case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+ break;
+ }
+
+ return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+ nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @idx: the entry index to write to
+ * @prof_id: profile ID
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+ u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+ struct ice_prof_tcam_entry;
+ enum ice_status status;
+
+ status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+ dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+ if (!status) {
+ hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
+ hw->blk[blk].prof.t[idx].prof_id = prof_id;
+ }
+
+ return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *ptr;
+
+ *refs = 0;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ while (ptr) {
+ (*refs)++;
+ ptr = ptr->next_vsi;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *ent;
+
+ list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ if (ent->profile_cookie == hdl)
+ return true;
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "Characteristic list for VSI group %d not found.\n",
+ vsig);
+ return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile ID extraction sequence changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct list_head *chgs)
+{
+ u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+ u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+ struct ice_pkg_es *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_VEC_TBL);
+ p = (struct ice_pkg_es *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ vec_size -
+ sizeof(p->es[0]));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->prof_id);
+
+ memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile ID TCAM changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
+ struct ice_prof_id_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_PROF_TCAM);
+ p = (struct ice_prof_id_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
+ p->entry[0].prof_id = tmp->prof_id;
+
+ memcpy(p->entry[0].key,
+ &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+ sizeof(hw->blk[blk].prof.t->key));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build XLT1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+ struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+ struct ice_xlt1_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_XLT1);
+ p = (struct ice_xlt1_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->ptype);
+ p->value[0] = tmp->ptg;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build XLT2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+ struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry) {
+ struct ice_xlt2_section *p;
+ u32 id;
+
+ switch (tmp->type) {
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ id = ice_sect_id(blk, ICE_XLT2);
+ p = (struct ice_xlt2_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->vsi);
+ p->value[0] = cpu_to_le16(tmp->vsig);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *chgs)
+{
+ struct ice_buf_build *b;
+ struct ice_chs_chg *tmp;
+ enum ice_status status;
+ u16 pkg_sects;
+ u16 xlt1 = 0;
+ u16 xlt2 = 0;
+ u16 tcam = 0;
+ u16 es = 0;
+ u16 sects;
+
+ /* count number of sections we need */
+ list_for_each_entry(tmp, chgs, list_entry) {
+ switch (tmp->type) {
+ case ICE_PTG_ES_ADD:
+ if (tmp->add_ptg)
+ xlt1++;
+ if (tmp->add_prof)
+ es++;
+ break;
+ case ICE_TCAM_ADD:
+ tcam++;
+ break;
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ xlt2++;
+ break;
+ default:
+ break;
+ }
+ }
+ sects = xlt1 + xlt2 + tcam + es;
+
+ if (!sects)
+ return 0;
+
+ /* Build update package buffer */
+ b = ice_pkg_buf_alloc(hw);
+ if (!b)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_pkg_buf_reserve_section(b, sects);
+ if (status)
+ goto error_tmp;
+
+ /* Preserve order of table update: ES, TCAM, PTG, VSIG */
+ if (es) {
+ status = ice_prof_bld_es(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (tcam) {
+ status = ice_prof_bld_tcam(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt1) {
+ status = ice_prof_bld_xlt1(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt2) {
+ status = ice_prof_bld_xlt2(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ /* After package buffer build check if the section count in buffer is
+ * non-zero and matches the number of sections detected for package
+ * update.
+ */
+ pkg_sects = ice_pkg_buf_get_active_sections(b);
+ if (!pkg_sects || pkg_sects != sects) {
+ status = ICE_ERR_INVAL_SIZE;
+ goto error_tmp;
+ }
+
+ /* update package */
+ status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+ if (status == ICE_ERR_AQ_ERROR)
+ ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
+
+error_tmp:
+ ice_pkg_buf_free(hw, b);
+ return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTGs with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the ID value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es)
+{
+ u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+ DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_prof_map *prof;
+ enum ice_status status;
+ u32 byte = 0;
+ u8 prof_id;
+
+ bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+ /* search for existing profile */
+ status = ice_find_prof_id(hw, blk, es, &prof_id);
+ if (status) {
+ /* allocate profile ID */
+ status = ice_alloc_prof_id(hw, blk, &prof_id);
+ if (status)
+ goto err_ice_add_prof;
+
+ /* and write new es */
+ ice_write_es(hw, blk, prof_id, es);
+ }
+
+ ice_prof_inc_ref(hw, blk, prof_id);
+
+ /* add profile info */
+ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
+ if (!prof)
+ goto err_ice_add_prof;
+
+ prof->profile_cookie = id;
+ prof->prof_id = prof_id;
+ prof->ptg_cnt = 0;
+ prof->context = 0;
+
+ /* build list of ptgs */
+ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+ u32 bit;
+
+ if (!ptypes[byte]) {
+ bytes--;
+ byte++;
+ continue;
+ }
+
+ /* Examine 8 bits per byte */
+ for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
+ BITS_PER_BYTE) {
+ u16 ptype;
+ u8 ptg;
+ u8 m;
+
+ ptype = byte * BITS_PER_BYTE + bit;
+
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
+
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
+
+ set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
+
+ /* nothing left in byte, then exit */
+ m = ~((1 << (bit + 1)) - 1);
+ if (!(ptypes[byte] & m))
+ break;
+ }
+
+ bytes--;
+ byte++;
+ }
+
+ list_add(&prof->list, &hw->blk[blk].es.prof_map);
+ status = 0;
+
+err_ice_add_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added. This
+ * version assumes that the caller has already acquired the prof map lock.
+ */
+static struct ice_prof_map *
+ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry = NULL;
+ struct ice_prof_map *map;
+
+ list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
+ if (map->profile_cookie == id) {
+ entry = map;
+ break;
+ }
+
+ return entry;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+static struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry;
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ entry = ice_search_prof_id_low(hw, blk, id);
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+
+ return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+ struct ice_vsig_prof *p;
+
+ list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ count++;
+
+ return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a TCAM index
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+ /* Masks to invoke a never match entry */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+ dc_msk, nm_msk);
+ if (status)
+ return status;
+
+ /* release the TCAM entry */
+ status = ice_free_tcam_ent(hw, blk, idx);
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof: pointer to profile structure to remove
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_vsig_prof *prof)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < prof->tcam_count; i++)
+ if (prof->tcam[i].in_use) {
+ prof->tcam[i].in_use = false;
+ status = ice_rel_tcam_idx(hw, blk,
+ prof->tcam[i].tcam_idx);
+ if (status)
+ return ICE_ERR_HW_TABLE;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *vsi_cur;
+ struct ice_vsig_prof *d, *t;
+ enum ice_status status;
+
+ /* remove TCAM entries */
+ list_for_each_entry_safe(d, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ status = ice_rem_prof_id(hw, blk, d);
+ if (status)
+ return status;
+
+ list_del(&d->list);
+ devm_kfree(ice_hw_to_dev(hw), d);
+ }
+
+ /* Move all VSIS associated with this VSIG to the default VSIG */
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the list
+ * and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur)
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+ struct ice_chs_chg *p;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->type = ICE_VSIG_REM;
+ p->orig_vsig = vsig;
+ p->vsig = ICE_DEFAULT_VSIG;
+ p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+ list_add(&p->list_entry, chg);
+
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ return ice_vsig_free(hw, blk, vsig);
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct list_head *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *p, *t;
+ enum ice_status status;
+
+ list_for_each_entry_safe(p, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ if (p->profile_cookie == hdl) {
+ if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+ /* this is the last profile, remove the VSIG */
+ return ice_rem_vsig(hw, blk, vsig, chg);
+
+ status = ice_rem_prof_id(hw, blk, p);
+ if (!status) {
+ list_del(&p->list);
+ devm_kfree(ice_hw_to_dev(hw), p);
+ }
+ return status;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_chs_chg *del, *tmp;
+ enum ice_status status;
+ struct list_head chg;
+ u16 i;
+
+ INIT_LIST_HEAD(&chg);
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+ if (ice_has_prof_vsig(hw, blk, i, id)) {
+ status = ice_rem_prof_id_vsig(hw, blk, i, id,
+ &chg);
+ if (status)
+ goto err_ice_rem_flow_all;
+ }
+ }
+
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the ID parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *pmap;
+ enum ice_status status;
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+ pmap = ice_search_prof_id_low(hw, blk, id);
+ if (!pmap) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_rem_prof;
+ }
+
+ /* remove all flows with this profile */
+ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+ if (status)
+ goto err_ice_rem_prof;
+
+ /* dereference profile, and possibly remove */
+ ice_prof_dec_ref(hw, blk, pmap->prof_id);
+
+ list_del(&pmap->list);
+ devm_kfree(ice_hw_to_dev(hw), pmap);
+
+err_ice_rem_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_get_prof - get profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+ struct list_head *chg)
+{
+ struct ice_prof_map *map;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < map->ptg_cnt; i++)
+ if (!hw->blk[blk].es.written[map->prof_id]) {
+ /* add ES to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ goto err_ice_get_prof;
+
+ p->type = ICE_PTG_ES_ADD;
+ p->ptype = 0;
+ p->ptg = map->ptg[i];
+ p->add_ptg = 0;
+
+ p->add_prof = 1;
+ p->prof_id = map->prof_id;
+
+ hw->blk[blk].es.written[map->prof_id] = true;
+
+ list_add(&p->list_entry, chg);
+ }
+
+ return 0;
+
+err_ice_get_prof:
+ /* let caller clean up the change list */
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *lst)
+{
+ struct ice_vsig_prof *ent1, *ent2;
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ struct ice_vsig_prof *p;
+
+ /* copy to the input list */
+ p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ goto err_ice_get_profs_vsig;
+
+ list_add_tail(&p->list, lst);
+ }
+
+ return 0;
+
+err_ice_get_profs_vsig:
+ list_for_each_entry_safe(ent1, ent2, lst, list) {
+ list_del(&ent1->list);
+ devm_kfree(ice_hw_to_dev(hw), ent1);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *lst, u64 hdl)
+{
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *p;
+ u16 i;
+
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->profile_cookie = map->profile_cookie;
+ p->prof_id = map->prof_id;
+ p->tcam_count = map->ptg_cnt;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ p->tcam[i].prof_id = map->prof_id;
+ p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+ p->tcam[i].ptg = map->ptg[i];
+ }
+
+ list_add(&p->list, lst);
+
+ return 0;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 orig_vsig;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (!status)
+ status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+ }
+
+ p->type = ICE_VSI_MOVE;
+ p->vsi = vsi;
+ p->orig_vsig = orig_vsig;
+ p->vsig = vsig;
+
+ list_add(&p->list_entry, chg);
+
+ return 0;
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable TCAM change
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the VSIG of the TCAM entry
+ * @tcam: pointer the TCAM info structure of the TCAM to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable TCAM entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+ u16 vsig, struct ice_tcam_inf *tcam,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+
+ /* Default: enable means change the low flag bit to don't care */
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+
+ /* if disabling, free the TCAM */
+ if (!enable) {
+ status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ tcam->tcam_idx = 0;
+ tcam->in_use = 0;
+ return status;
+ }
+
+ /* for re-enabling, reallocate a TCAM */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ if (status)
+ return status;
+
+ /* add TCAM to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+ tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+ nm_msk);
+ if (status)
+ goto err_ice_prof_tcam_ena_dis;
+
+ tcam->in_use = 1;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = tcam->prof_id;
+ p->ptg = tcam->ptg;
+ p->vsig = 0;
+ p->tcam_idx = tcam->tcam_idx;
+
+ /* log change */
+ list_add(&p->list_entry, chg);
+
+ return 0;
+
+err_ice_prof_tcam_ena_dis:
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *chg)
+{
+ DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 idx;
+
+ bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ /* Priority is based on the order in which the profiles are added. The
+ * newest added profile has highest priority and the oldest added
+ * profile has the lowest priority. Since the profile property list for
+ * a VSIG is sorted from newest to oldest, this code traverses the list
+ * in order and enables the first of each PTG that it finds (that is not
+ * already enabled); it also disables any duplicate PTGs that it finds
+ * in the older profiles (that are currently enabled).
+ */
+
+ list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ u16 i;
+
+ for (i = 0; i < t->tcam_count; i++) {
+ /* Scan the priorities from newest to oldest.
+ * Make sure that the newest profiles take priority.
+ */
+ if (test_bit(t->tcam[i].ptg, ptgs_used) &&
+ t->tcam[i].in_use) {
+ /* need to mark this PTG as never match, as it
+ * was already in use and therefore duplicate
+ * (and lower priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, false,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
+ !t->tcam[i].in_use) {
+ /* need to enable this PTG, as it in not in use
+ * and not enabled (highest priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, true,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ }
+
+ /* keep track of used ptgs */
+ set_bit(t->tcam[i].ptg, ptgs_used);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct list_head *chg)
+{
+ /* Masks that ignore flags */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *t;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Error, if this VSIG already has this profile */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+ return ICE_ERR_ALREADY_EXISTS;
+
+ /* new VSIG profile structure */
+ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return ICE_ERR_NO_MEMORY;
+
+ t->profile_cookie = map->profile_cookie;
+ t->prof_id = map->prof_id;
+ t->tcam_count = map->ptg_cnt;
+
+ /* create TCAM entries */
+ for (i = 0; i < map->ptg_cnt; i++) {
+ enum ice_status status;
+ u16 tcam_idx;
+
+ /* add TCAM to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto err_ice_add_prof_id_vsig;
+
+ /* allocate the TCAM entry index */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ t->tcam[i].ptg = map->ptg[i];
+ t->tcam[i].prof_id = map->prof_id;
+ t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].in_use = true;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = t->tcam[i].prof_id;
+ p->ptg = t->tcam[i].ptg;
+ p->vsig = vsig;
+ p->tcam_idx = t->tcam[i].tcam_idx;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+ t->tcam[i].prof_id,
+ t->tcam[i].ptg, vsig, 0, 0,
+ vl_msk, dc_msk, nm_msk);
+ if (status)
+ goto err_ice_add_prof_id_vsig;
+
+ /* log change */
+ list_add(&p->list_entry, chg);
+ }
+
+ /* add profile to VSIG */
+ list_add(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+
+ return 0;
+
+err_ice_add_prof_id_vsig:
+ /* let caller clean up the change list */
+ devm_kfree(ice_hw_to_dev(hw), t);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 new_vsig;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ new_vsig = ice_vsig_alloc(hw, blk);
+ if (!new_vsig) {
+ status = ICE_ERR_HW_TABLE;
+ goto err_ice_create_prof_id_vsig;
+ }
+
+ status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ p->type = ICE_VSIG_ADD;
+ p->vsi = vsi;
+ p->orig_vsig = ICE_DEFAULT_VSIG;
+ p->vsig = new_vsig;
+
+ list_add(&p->list_entry, chg);
+
+ return 0;
+
+err_ice_create_prof_id_vsig:
+ /* let caller clean up the change list */
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+}
+
+/**
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ struct list_head *lst, struct list_head *chg)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = ice_vsig_alloc(hw, blk);
+ if (!vsig)
+ return ICE_ERR_HW_TABLE;
+
+ status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+ if (status)
+ return status;
+
+ list_for_each_entry(t, lst, list) {
+ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+ chg);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ struct list_head lst;
+
+ INIT_LIST_HEAD(&lst);
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return false;
+
+ t->profile_cookie = hdl;
+ list_add(&t->list, &lst);
+
+ status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+ list_del(&t->list);
+ kfree(t);
+
+ return !status;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to enable with the profile specified by ID
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct ice_chs_chg *tmp, *del;
+ struct list_head union_lst;
+ enum ice_status status;
+ struct list_head chg;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&union_lst);
+ INIT_LIST_HEAD(&chg);
+
+ /* Get profile */
+ status = ice_get_prof(hw, blk, hdl, &chg);
+ if (status)
+ return status;
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool only_vsi;
+ u16 or_vsig;
+ u16 ref;
+
+ /* found in VSIG */
+ or_vsig = vsig;
+
+ /* make sure that there is no overlap/conflict between the new
+ * characteristics and the existing ones; we don't support that
+ * scenario
+ */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto err_ice_add_prof_id_flow;
+ }
+
+ /* last VSI in the VSIG? */
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ /* create a union of the current profiles and the one being
+ * added
+ */
+ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* search for an existing VSIG with an exact charc match */
+ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+ if (!status) {
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* VSI has been moved out of or_vsig. If the or_vsig had
+ * only that VSI it is now empty and can be removed.
+ */
+ if (only_vsi) {
+ status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else if (only_vsi) {
+ /* If the original VSIG only contains one VSI, then it
+ * will be the requesting VSI. In this case the VSI is
+ * not sharing entries and we can simply add the new
+ * profile to the VSIG.
+ */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* No match, so we need a new VSIG */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &union_lst, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else {
+ /* need to find or add a VSIG */
+ /* search for an existing VSIG with an exact charc match */
+ if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* we did not find an exact match */
+ /* we need to add a VSIG */
+ status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ }
+
+ /* update hardware */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_add_prof_id_flow:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
+ list_del(&del1->list);
+ devm_kfree(ice_hw_to_dev(hw), del1);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_from_list - remove a profile from list
+ * @hw: pointer to the HW struct
+ * @lst: list to remove the profile from
+ * @hdl: the profile handle indicating the profile to remove
+ */
+static enum ice_status
+ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
+{
+ struct ice_vsig_prof *ent, *tmp;
+
+ list_for_each_entry_safe(ent, tmp, lst, list)
+ if (ent->profile_cookie == hdl) {
+ list_del(&ent->list);
+ devm_kfree(ice_hw_to_dev(hw), ent);
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_prof_id_flow - remove flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI from which to remove the profile specified by ID
+ * @hdl: profile tracking handle
+ *
+ * Calling this function will update the hardware tables to remove the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct ice_chs_chg *tmp, *del;
+ struct list_head chg, copy;
+ enum ice_status status;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&copy);
+ INIT_LIST_HEAD(&chg);
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool last_profile;
+ bool only_vsi;
+ u16 ref;
+
+ /* found in VSIG */
+ last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ if (only_vsi) {
+ /* If the original VSIG only contains one reference,
+ * which will be the requesting VSI, then the VSI is not
+ * sharing entries and we can simply remove the specific
+ * characteristics from the VSIG.
+ */
+
+ if (last_profile) {
+ /* If there are no profiles left for this VSIG,
+ * then simply remove the the VSIG.
+ */
+ status = ice_rem_vsig(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ status = ice_rem_prof_id_vsig(hw, blk, vsig,
+ hdl, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+
+ } else {
+ /* Make a copy of the VSIG's list of Profiles */
+ status = ice_get_profs_vsig(hw, blk, vsig, &copy);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Remove specified profile entry from the list */
+ status = ice_rem_prof_from_list(hw, &copy, hdl);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ if (list_empty(&copy)) {
+ status = ice_move_vsi(hw, blk, vsi,
+ ICE_DEFAULT_VSIG, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
+ &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ /* Search for a VSIG with a matching profile
+ * list
+ */
+
+ /* Found match, move VSI to the matching VSIG */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ /* since no existing VSIG supports this
+ * characteristic pattern, we need to create a
+ * new VSIG and TCAM entries
+ */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &copy, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+ }
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ /* update hardware tables */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_prof_id_flow:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ list_for_each_entry_safe(del1, tmp1, &copy, list) {
+ list_del(&del1->list);
+ devm_kfree(ice_hw_to_dev(hw), del1);
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 37eb282742d1..c7b5e1a6ea2b 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,13 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es);
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
@@ -26,4 +33,6 @@ void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
+enum ice_status
+ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 5d5a7eaffa30..0fb3fe3ff3ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -3,6 +3,9 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
@@ -105,37 +108,57 @@ struct ice_buf_hdr {
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+#define ICE_SID_CDID_REDIR_SW 18
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
@@ -152,6 +175,19 @@ enum ice_block {
ICE_BLK_COUNT
};
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
/* package labels */
struct ice_label {
__le16 value;
@@ -234,6 +270,13 @@ struct ice_prof_redir_section {
u8 redir_value[1];
};
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
@@ -248,6 +291,12 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
+struct ice_pkg_es {
+ __le16 count;
+ __le16 offset;
+ struct ice_fv_word es[1];
+};
+
struct ice_es {
u32 sid;
u16 count;
@@ -280,6 +329,35 @@ struct ice_ptg_ptype {
u8 ptg;
};
+#define ICE_MAX_TCAM_PER_PROFILE 32
+#define ICE_MAX_PTG_PER_PROFILE 32
+
+struct ice_prof_map {
+ struct list_head list;
+ u64 profile_cookie;
+ u64 context;
+ u8 prof_id;
+ u8 ptg_cnt;
+ u8 ptg[ICE_MAX_PTG_PER_PROFILE];
+};
+
+#define ICE_INVALID_TCAM 0xFFFF
+
+struct ice_tcam_inf {
+ u16 tcam_idx;
+ u8 ptg;
+ u8 prof_id;
+ u8 in_use;
+};
+
+struct ice_vsig_prof {
+ struct list_head list;
+ u64 profile_cookie;
+ u8 prof_id;
+ u8 tcam_count;
+ struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
+};
+
struct ice_vsig_entry {
struct list_head prop_lst;
struct ice_vsig_vsi *first_vsi;
@@ -329,6 +407,13 @@ struct ice_xlt2 {
u16 count;
};
+/* Profile ID Management */
+struct ice_prof_id_key {
+ __le16 flags;
+ u8 xlt1;
+ __le16 xlt2_cdid;
+} __packed;
+
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
@@ -371,4 +456,31 @@ struct ice_blk_info {
u8 is_list_init;
};
+enum ice_chg_type {
+ ICE_TCAM_NONE = 0,
+ ICE_PTG_ES_ADD,
+ ICE_TCAM_ADD,
+ ICE_VSIG_ADD,
+ ICE_VSIG_REM,
+ ICE_VSI_MOVE,
+};
+
+struct ice_chs_chg {
+ struct list_head list_entry;
+ enum ice_chg_type type;
+
+ u8 add_ptg;
+ u8 add_vsig;
+ u8 add_tcam_idx;
+ u8 add_prof;
+ u16 ptype;
+ u8 ptg;
+ u8 prof_id;
+ u16 vsi;
+ u16 vsig;
+ u16 orig_vsig;
+ u16 tcam_idx;
+};
+
+#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
new file mode 100644
index 000000000000..a05ceb59863b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -0,0 +1,1275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_flow.h"
+
+/* Describe properties of a protocol header field */
+struct ice_flow_field_info {
+ enum ice_flow_seg_hdr hdr;
+ s16 off; /* Offset from start of a protocol header, in bits */
+ u16 size; /* Size of fields in bits */
+};
+
+#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
+ .hdr = _hdr, \
+ .off = (_offset_bytes) * BITS_PER_BYTE, \
+ .size = (_size_bytes) * BITS_PER_BYTE, \
+}
+
+/* Table containing properties of supported protocol header fields */
+static const
+struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+ /* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV4_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV6_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
+ /* Transport */
+ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
+
+};
+
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single IPv4 header
+ */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+ 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
+ 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+ 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
+ 0x00000770, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* UDP Packet types for non-tunneled packets or tunneled
+ * packets with inner UDP.
+ */
+static const u32 ice_ptypes_udp_il[] = {
+ 0x81000000, 0x20204040, 0x04000010, 0x80810102,
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+ 0x04000000, 0x80810102, 0x10000040, 0x02040408,
+ 0x00000102, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+ 0x08000000, 0x01020204, 0x20000081, 0x04080810,
+ 0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+ enum ice_block blk;
+ u16 entry_length; /* # of bytes formatted entry will require */
+ u8 es_cnt;
+ struct ice_flow_prof *prof;
+
+ /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
+ * This will give us the direction flags.
+ */
+ struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+#define ICE_FLOW_SEG_HDRS_L3_MASK \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+#define ICE_FLOW_SEG_HDRS_L4_MASK \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+ u8 i;
+
+ for (i = 0; i < segs_cnt; i++) {
+ /* Multiple L3 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+ !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+ return ICE_ERR_PARAM;
+
+ /* Multiple L4 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+ !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+ return ICE_ERR_PARAM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof;
+ u8 i;
+
+ memset(params->ptypes, 0xff, sizeof(params->ptypes));
+
+ prof = params->prof;
+
+ for (i = 0; i < params->prof->segs_cnt; i++) {
+ const unsigned long *src;
+ u32 hdrs;
+
+ hdrs = prof->segs[i].hdrs;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
+ (const unsigned long *)ice_ptypes_ipv4_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
+ (const unsigned long *)ice_ptypes_ipv6_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ src = (const unsigned long *)ice_ptypes_udp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+ bitmap_and(params->ptypes, params->ptypes,
+ (const unsigned long *)ice_ptypes_tcp_il,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+ src = (const unsigned long *)ice_ptypes_sctp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg, enum ice_flow_field fld)
+{
+ enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+ u8 fv_words = hw->blk[params->blk].es.fvw;
+ struct ice_flow_fld_info *flds;
+ u16 cnt, ese_bits, i;
+ u16 off;
+
+ flds = params->prof->segs[seg].fields;
+
+ switch (fld) {
+ case ICE_FLOW_FIELD_IDX_IPV4_SA:
+ case ICE_FLOW_FIELD_IDX_IPV4_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+ prot_id = ICE_PROT_TCP_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
+ prot_id = ICE_PROT_SCTP_IL;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ /* Each extraction sequence entry is a word in size, and extracts a
+ * word-aligned offset from a protocol header.
+ */
+ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
+
+ flds[fld].xtrct.prot_id = prot_id;
+ flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
+ flds[fld].xtrct.idx = params->es_cnt;
+
+ /* Adjust the next field-entry index after accommodating the number of
+ * entries this field consumes
+ */
+ cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
+ ese_bits);
+
+ /* Fill in the extraction sequence entries needed for this field */
+ off = flds[fld].xtrct.off;
+ for (i = 0; i < cnt; i++) {
+ u8 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+ struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof = params->prof;
+ enum ice_status status = 0;
+ u8 i;
+
+ for (i = 0; i < prof->segs_cnt; i++) {
+ u8 j;
+
+ for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i,
+ (enum ice_flow_field)j);
+ if (status)
+ return status;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+ enum ice_status status;
+
+ status = ice_flow_proc_seg_hdrs(params);
+ if (status)
+ return status;
+
+ status = ice_flow_create_xtrct_seq(hw, params);
+ if (status)
+ return status;
+
+ switch (params->blk) {
+ case ICE_BLK_RSS:
+ /* Only header information is provided for RSS configuration.
+ * No further processing is needed.
+ */
+ status = 0;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ return status;
+}
+
+#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
+#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
+#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+
+/**
+ * ice_flow_find_prof_conds - Find a profile matching headers and conditions
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
+ * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
+ u8 segs_cnt, u16 vsi_handle, u32 conds)
+{
+ struct ice_flow_prof *p, *prof = NULL;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+ list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
+ if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
+ segs_cnt && segs_cnt == p->segs_cnt) {
+ u8 i;
+
+ /* Check for profile-VSI association if specified */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
+ ice_is_vsi_valid(hw, vsi_handle) &&
+ !test_bit(vsi_handle, p->vsis))
+ continue;
+
+ /* Protocol headers must be checked. Matched fields are
+ * checked if specified.
+ */
+ for (i = 0; i < segs_cnt; i++)
+ if (segs[i].hdrs != p->segs[i].hdrs ||
+ ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
+ segs[i].match != p->segs[i].match))
+ break;
+
+ /* A match is found if all segments are matched */
+ if (i == segs_cnt) {
+ prof = p;
+ break;
+ }
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return prof;
+}
+
+/**
+ * ice_flow_find_prof_id - Look up a profile with given profile ID
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: unique ID to identify this flow profile
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *p;
+
+ list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
+ if (p->id == prof_id)
+ return p;
+
+ return NULL;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, u64 prof_id,
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof)
+{
+ struct ice_flow_prof_params params;
+ enum ice_status status;
+ u8 i;
+
+ if (!prof)
+ return ICE_ERR_BAD_PTR;
+
+ memset(&params, 0, sizeof(params));
+ params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
+ GFP_KERNEL);
+ if (!params.prof)
+ return ICE_ERR_NO_MEMORY;
+
+ /* initialize extraction sequence to all invalid (0xff) */
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params.es[i].prot_id = ICE_PROT_INVALID;
+ params.es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ params.blk = blk;
+ params.prof->id = prof_id;
+ params.prof->dir = dir;
+ params.prof->segs_cnt = segs_cnt;
+
+ /* Make a copy of the segments that need to be persistent in the flow
+ * profile instance
+ */
+ for (i = 0; i < segs_cnt; i++)
+ memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
+
+ status = ice_flow_proc_segs(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW,
+ "Error processing a flow's packet segments\n");
+ goto out;
+ }
+
+ /* Add a HW profile for this flow profile */
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&params.prof->entries);
+ mutex_init(&params.prof->entries_lock);
+ *prof = params.prof;
+
+out:
+ if (status)
+ devm_kfree(ice_hw_to_dev(hw), params.prof);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof_sync - remove a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile to remove
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof)
+{
+ enum ice_status status;
+
+ /* Remove all hardware profiles associated with this flow profile */
+ status = ice_rem_prof(hw, blk, prof->id);
+ if (!status) {
+ list_del(&prof->l_entry);
+ mutex_destroy(&prof->entries_lock);
+ devm_kfree(ice_hw_to_dev(hw), prof);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_assoc_prof - associate a VSI with a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+
+ if (!test_bit(vsi_handle, prof->vsis)) {
+ status = ice_add_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ set_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile add failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+
+ if (test_bit(vsi_handle, prof->vsis)) {
+ status = ice_rem_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ clear_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile remove failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ */
+static enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof)
+{
+ enum ice_status status;
+
+ if (segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_MAX_LIMIT;
+
+ if (!segs_cnt)
+ return ICE_ERR_PARAM;
+
+ if (!segs)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_flow_val_hdrs(segs, segs_cnt);
+ if (status)
+ return status;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+ prof);
+ if (!status)
+ list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
+
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the HW struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+static enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ /* prof becomes invalid after the call */
+ status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @type: type of the field
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ *
+ * This helper function stores information of a field being matched, including
+ * the type of the field and the locations of the value to match, the mask, and
+ * and the upper-bound value in the start of the input buffer for a flow entry.
+ * This function should only be used for fixed-size data structures.
+ *
+ * This function also opportunistically determines the protocol headers to be
+ * present based on the fields being set. Some fields cannot be used alone to
+ * determine the protocol headers present. Sometimes, fields for particular
+ * protocol headers are not matched. In those cases, the protocol headers
+ * must be explicitly set.
+ */
+static void
+ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ enum ice_flow_fld_match_type type, u16 val_loc,
+ u16 mask_loc, u16 last_loc)
+{
+ u64 bit = BIT_ULL(fld);
+
+ seg->match |= bit;
+ if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ seg->range |= bit;
+
+ seg->fields[fld].type = type;
+ seg->fields[fld].src.val = val_loc;
+ seg->fields[fld].src.mask = mask_loc;
+ seg->fields[fld].src.last = last_loc;
+
+ ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
+}
+
+/**
+ * ice_flow_set_fld - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ * @range: indicate if field being matched is to be in a range
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match,
+ * the mask value, and upper value can be extracted. These locations are then
+ * stored in the flow profile. When adding a flow entry associated with the
+ * flow profile, these locations will be used to quickly extract the values and
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+static void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
+{
+ enum ice_flow_fld_match_type t = range ?
+ ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
+
+ ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
+}
+
+#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+
+#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
+ (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
+
+/**
+ * ice_flow_set_rss_seg_info - setup packet segments for RSS
+ * @segs: pointer to the flow field segment(s)
+ * @hash_fields: fields to be hashed on for the segment(s)
+ * @flow_hdr: protocol header fields within a packet segment
+ *
+ * Helper function to extract fields from hash bitmap and use flow
+ * header value to set flow field segment for further use in flow
+ * profile entry or removal.
+ */
+static enum ice_status
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
+ u32 flow_hdr)
+{
+ u64 val;
+ u8 i;
+
+ for_each_set_bit(i, (unsigned long *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ ICE_FLOW_SET_HDRS(segs, flow_hdr);
+
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ return ICE_ERR_PARAM;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ if (val && !is_power_of_2(val))
+ return ICE_ERR_CFG;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ if (val && !is_power_of_2(val))
+ return ICE_ERR_CFG;
+
+ return 0;
+}
+
+/**
+ * ice_rem_vsi_rss_list - remove VSI from RSS list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * Remove the VSI from all RSS configurations in the list.
+ */
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ if (list_empty(&hw->rss_list_head))
+ return;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
+ if (test_and_clear_bit(vsi_handle, r->vsis))
+ if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ mutex_unlock(&hw->rss_locks);
+}
+
+/**
+ * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function will iterate through all flow profiles and disassociate
+ * the VSI from that profile. If the flow profile has no VSIs it will
+ * be removed.
+ */
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *p, *t;
+ enum ice_status status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (list_empty(&hw->fl_profs[blk]))
+ return 0;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+ list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
+ if (test_bit(vsi_handle, p->vsis)) {
+ status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
+ if (status)
+ break;
+
+ if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof_sync(hw, blk, p);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_rem_rss_list - remove RSS configuration from list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static void
+ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ /* Search for RSS hash fields associated to the VSI that match the
+ * hash configurations associated to the flow profile. If found
+ * remove from the RSS entry list of the VSI context and delete entry.
+ */
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ clear_bit(vsi_handle, r->vsis);
+ if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ return;
+ }
+}
+
+/**
+ * ice_add_rss_list - add RSS configuration to list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *rss_cfg;
+
+ list_for_each_entry(r, &hw->rss_list_head, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ set_bit(vsi_handle, r->vsis);
+ return 0;
+ }
+
+ rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
+ GFP_KERNEL);
+ if (!rss_cfg)
+ return ICE_ERR_NO_MEMORY;
+
+ rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ set_bit(vsi_handle, rss_cfg->vsis);
+
+ list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
+
+ return 0;
+}
+
+#define ICE_FLOW_PROF_HASH_S 0
+#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S 32
+#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_ENCAP_S 63
+#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+
+#define ICE_RSS_OUTER_HEADERS 1
+
+/* Flow profile ID format:
+ * [0:31] - Packet match fields
+ * [32:62] - Protocol header
+ * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+ */
+#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
+ (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+ ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+
+/**
+ * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ * @segs_cnt: packet segment count
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs, u8 segs_cnt)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_flow_seg_info *segs;
+ enum ice_status status;
+
+ if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_PARAM;
+
+ segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
+ if (!segs)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Construct the packet segment info from the hashed fields */
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
+ if (status)
+ goto exit;
+
+ /* Search for a flow profile that has matching headers, hash fields
+ * and has the input VSI associated to it. If found, no further
+ * operations required and exit.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof)
+ goto exit;
+
+ /* Check if a flow profile exists with the same protocol headers and
+ * associated with the input VSI. If so disassociate the VSI from
+ * this profile. The VSI will be added to a new profile created with
+ * the protocol header and new hash field configuration.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof) {
+ status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ ice_rem_rss_list(hw, vsi_handle, prof);
+ else
+ goto exit;
+
+ /* Remove profile if it has no VSIs associated */
+ if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof(hw, blk, prof->id);
+ if (status)
+ goto exit;
+ }
+ }
+
+ /* Search for a profile that has same match fields only. If this
+ * exists then associate the VSI to this profile.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS);
+ if (prof) {
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+ goto exit;
+ }
+
+ /* Create a new flow profile with generated profile and packet
+ * segment information.
+ */
+ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
+ ICE_FLOW_GEN_PROFID(hashed_flds,
+ segs[segs_cnt - 1].hdrs,
+ segs_cnt),
+ segs, segs_cnt, &prof);
+ if (status)
+ goto exit;
+
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ /* If association to a new flow profile failed then this profile can
+ * be removed.
+ */
+ if (status) {
+ ice_flow_rem_prof(hw, blk, prof->id);
+ goto exit;
+ }
+
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+
+exit:
+ kfree(segs);
+ return status;
+}
+
+/**
+ * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * This function will generate a flow profile based on fields associated with
+ * the input fields to hash on, the flow type and use the VSI number to add
+ * a flow entry to the profile.
+ */
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs)
+{
+ enum ice_status status;
+
+ if (hashed_flds == ICE_HASH_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
+/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
+ * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
+ * convert its values to their appropriate flow L3, L4 values.
+ */
+#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+
+#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+
+/**
+ * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ *
+ * This function will take the hash bitmap provided by the AVF driver via a
+ * message, convert it to ICE-compatible values, and configure RSS flow
+ * profiles.
+ */
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+{
+ enum ice_status status = 0;
+ u64 hash_flds;
+
+ if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Make sure no unsupported bits are specified */
+ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
+ ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
+ return ICE_ERR_CFG;
+
+ hash_flds = avf_hash;
+
+ /* Always create an L3 RSS configuration for any L4 RSS configuration */
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
+
+ /* Create the corresponding RSS configuration for each valid hash bit */
+ while (hash_flds) {
+ u64 rss_hash = ICE_HASH_INVALID;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ }
+ } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ }
+ }
+
+ if (rss_hash == ICE_HASH_INVALID)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
+ ICE_FLOW_SEG_HDR_NONE);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_replay_rss_cfg - replay RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ */
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+ struct ice_rss_cfg *r;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry(r, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis)) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_OUTER_HEADERS);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
+/**
+ * ice_get_rss_cfg - returns hashed fields for the given header types
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hdrs: protocol header type
+ *
+ * This function will return the match fields of the first instance of flow
+ * profile having the given header types and containing input VSI
+ */
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+{
+ struct ice_rss_cfg *r, *rss_cfg = NULL;
+
+ /* verify if the protocol header is non zero and VSI is valid */
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_HASH_INVALID;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry(r, &hw->rss_list_head, l_entry)
+ if (test_bit(vsi_handle, r->vsis) &&
+ r->packet_hdr == hdrs) {
+ rss_cfg = r;
+ break;
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
new file mode 100644
index 000000000000..5558627bd5eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_FLOW_H_
+#define _ICE_FLOW_H_
+
+#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
+#define ICE_FLOW_FLD_OFF_INVAL 0xffff
+
+/* Generate flow hash field from flow field type(s) */
+#define ICE_FLOW_HASH_IPV4 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
+#define ICE_FLOW_HASH_IPV6 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_TCP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
+#define ICE_FLOW_HASH_UDP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
+#define ICE_FLOW_HASH_SCTP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
+
+#define ICE_HASH_INVALID 0
+#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+
+/* Protocol header fields within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization such as GRE,
+ * VxLAN, etc.
+ */
+enum ice_flow_seg_hdr {
+ ICE_FLOW_SEG_HDR_NONE = 0x00000000,
+ ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
+ ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
+ ICE_FLOW_SEG_HDR_TCP = 0x00000040,
+ ICE_FLOW_SEG_HDR_UDP = 0x00000080,
+ ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+};
+
+enum ice_flow_field {
+ /* L3 */
+ ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_DA,
+ /* L4 */
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* The total number of enums must not exceed 64 */
+ ICE_FLOW_FIELD_IDX_MAX
+};
+
+/* Flow headers and fields for AVF support */
+enum ice_flow_avf_hdr_field {
+ /* Values 0 - 28 are reserved for future use */
+ ICE_AVF_FLOW_FIELD_INVALID = 0,
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP,
+ ICE_AVF_FLOW_FIELD_IPV4_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV4_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV4,
+ /* Values 37-38 are reserved */
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP,
+ ICE_AVF_FLOW_FIELD_IPV6_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV6_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV6,
+ ICE_AVF_FLOW_FIELD_RSVD47,
+ ICE_AVF_FLOW_FIELD_FCOE_OX,
+ ICE_AVF_FLOW_FIELD_FCOE_RX,
+ ICE_AVF_FLOW_FIELD_FCOE_OTHER,
+ /* Values 51-62 are reserved */
+ ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
+ ICE_AVF_FLOW_FIELD_MAX
+};
+
+/* Supported RSS offloads This macro is defined to support
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * capabilities to the caller of this ops.
+ */
+#define ICE_DEFAULT_RSS_HENA ( \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+
+enum ice_flow_dir {
+ ICE_FLOW_RX = 0x02,
+};
+
+enum ice_flow_priority {
+ ICE_FLOW_PRIO_LOW,
+ ICE_FLOW_PRIO_NORMAL,
+ ICE_FLOW_PRIO_HIGH
+};
+
+#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_FV_EXTRACT_SZ 2
+
+#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
+
+struct ice_flow_seg_xtrct {
+ u8 prot_id; /* Protocol ID of extracted header field */
+ u16 off; /* Starting offset of the field in header in bytes */
+ u8 idx; /* Index of FV entry used */
+ u8 disp; /* Displacement of field in bits fr. FV entry's start */
+};
+
+enum ice_flow_fld_match_type {
+ ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
+ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
+ ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
+ ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
+};
+
+struct ice_flow_fld_loc {
+ /* Describe offsets of field information relative to the beginning of
+ * input buffer provided when adding flow entries.
+ */
+ u16 val; /* Offset where the value is located */
+ u16 mask; /* Offset where the mask/prefix value is located */
+ u16 last; /* Length or offset where the upper value is located */
+};
+
+struct ice_flow_fld_info {
+ enum ice_flow_fld_match_type type;
+ /* Location where to retrieve data from an input buffer */
+ struct ice_flow_fld_loc src;
+ /* Location where to put the data into the final entry buffer */
+ struct ice_flow_fld_loc entry;
+ struct ice_flow_seg_xtrct xtrct;
+};
+
+struct ice_flow_seg_info {
+ u32 hdrs; /* Bitmask indicating protocol headers present */
+ u64 match; /* Bitmask indicating header fields to be matched */
+ u64 range; /* Bitmask indicating header fields matched as ranges */
+
+ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+};
+
+struct ice_flow_prof {
+ struct list_head l_entry;
+
+ u64 id;
+ enum ice_flow_dir dir;
+ u8 segs_cnt;
+
+ /* Keep track of flow entries associated with this flow profile */
+ struct mutex entries_lock;
+ struct list_head entries;
+
+ struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
+
+ /* software VSI handles referenced by this flow profile */
+ DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+};
+
+struct ice_rss_cfg {
+ struct list_head l_entry;
+ /* bitmap of VSIs added to the RSS entry */
+ DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+ u64 hashed_flds;
+ u32 packet_hdr;
+};
+
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index e8f32350fed2..f2cababf2561 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -60,15 +60,6 @@
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
-#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 0997d352709b..878e125d8b42 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -199,6 +199,14 @@ enum ice_rxdid {
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
+/* Receive Descriptor MDID values that access packet flags */
+enum ice_flex_mdid_pkt_flags {
+ ICE_RX_MDID_PKT_FLAGS_15_0 = 20,
+ ICE_RX_MDID_PKT_FLAGS_31_16,
+ ICE_RX_MDID_PKT_FLAGS_47_32,
+ ICE_RX_MDID_PKT_FLAGS_63_48,
+};
+
/* Receive Descriptor MDID values */
enum ice_flex_rx_mdid {
ICE_RX_MDID_FLOW_ID_LOWER = 5,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index e7449248fab4..1874c9f51a32 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_base.h"
+#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -493,7 +494,28 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
+ * @vsi: the VSI being cleaned up
+ *
+ * This function deletes RSS input set for all flows that were configured
+ * for this VSI
+ */
+static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+
+ if (ice_is_safe_mode(pf))
+ return;
+
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status)
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures and configuration
* @vsi: the VSI being removed
*/
static void ice_rss_clean(struct ice_vsi *vsi)
@@ -507,6 +529,11 @@ static void ice_rss_clean(struct ice_vsi *vsi)
devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
devm_kfree(dev, vsi->rss_lut_user);
+
+ ice_vsi_clean_rss_flow_fld(vsi);
+ /* remove RSS replay list */
+ if (!ice_is_safe_mode(pf))
+ ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
}
/**
@@ -817,12 +844,23 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- /* Enable MAC Antispoof with new VSI being initialized or updated */
- if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
+ * respectively
+ */
+ if (vsi->type == ICE_VSI_VF) {
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ } else {
+ ctxt->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
+ }
}
/* Allow control frames out of main VSI */
@@ -1076,6 +1114,115 @@ ice_vsi_cfg_rss_exit:
}
/**
+ * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
+ * @vsi: VSI to be configured
+ *
+ * This function will only be called during the VF VSI setup. Upon successful
+ * completion of package download, this function will configure default RSS
+ * input sets for VF VSI.
+ */
+static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ if (status)
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
+ * @vsi: VSI to be configured
+ *
+ * This function will only be called after successful download package call
+ * during initialization of PF. Since the downloaded package will erase the
+ * RSS section, this function will configure RSS input sets for different
+ * flow types. The last profile added has the highest priority, therefore 2
+ * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
+ * (i.e. IPv4 src/dst TCP src/dst port).
+ */
+static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
+{
+ u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi_num);
+ return;
+ }
+ /* configure RSS for IPv4 with input set IP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for sctp4 with input set IP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for sctp6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+}
+
+/**
* ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
@@ -1636,22 +1783,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
ctxt->info = vsi->info;
- if (ena) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ if (ena)
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ else
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
if (!vlan_promisc)
ctxt->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -1661,7 +1800,6 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
goto err_out;
}
- vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
kfree(ctxt);
@@ -1899,8 +2037,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
@@ -1924,8 +2064,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_vf_rss_flow_fld(vsi);
+ }
break;
case ICE_VSI_LB:
ret = ice_vsi_alloc_rings(vsi);
@@ -2402,6 +2544,97 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
+ * @q_vector: pointer to q_vector which is being updated
+ * @coalesce: pointer to array of struct with stored coalesce
+ *
+ * Set coalesce param in q_vector and update these parameters in HW.
+ */
+static void
+ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
+ struct ice_coalesce_stored *coalesce)
+{
+ struct ice_ring_container *rx_rc = &q_vector->rx;
+ struct ice_ring_container *tx_rc = &q_vector->tx;
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ tx_rc->itr_setting = coalesce->itr_tx;
+ rx_rc->itr_setting = coalesce->itr_rx;
+
+ /* dynamic ITR values will be updated during Tx/Rx */
+ if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
+ wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(tx_rc->itr_setting) >>
+ ICE_ITR_GRAN_S);
+ if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
+ wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rx_rc->itr_setting) >>
+ ICE_ITR_GRAN_S);
+
+ q_vector->intrl = coalesce->intrl;
+ wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+}
+
+/**
+ * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
+ * @vsi: VSI connected with q_vectors
+ * @coalesce: array of struct with stored coalesce
+ *
+ * Returns array size.
+ */
+static int
+ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ struct ice_coalesce_stored *coalesce)
+{
+ int i;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ coalesce[i].itr_tx = q_vector->tx.itr_setting;
+ coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].intrl = q_vector->intrl;
+ }
+
+ return vsi->num_q_vectors;
+}
+
+/**
+ * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
+ * @vsi: VSI connected with q_vectors
+ * @coalesce: pointer to array of struct with stored coalesce
+ * @size: size of coalesce array
+ *
+ * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
+ * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
+ * to default value.
+ */
+static void
+ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ struct ice_coalesce_stored *coalesce, int size)
+{
+ int i;
+
+ if ((size && !coalesce) || !vsi)
+ return;
+
+ for (i = 0; i < size && i < vsi->num_q_vectors; i++)
+ ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+ &coalesce[i]);
+
+ for (; i < vsi->num_q_vectors; i++) {
+ struct ice_coalesce_stored coalesce_dflt = {
+ .itr_tx = ICE_DFLT_TX_ITR,
+ .itr_rx = ICE_DFLT_RX_ITR,
+ .intrl = 0
+ };
+ ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+ &coalesce_dflt);
+ }
+}
+
+/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
* @init_vsi: is this an initialization or a reconfigure of the VSI
@@ -2411,6 +2644,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_coalesce_stored *coalesce;
+ int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_status status;
struct ice_pf *pf;
@@ -2423,6 +2658,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+ if (coalesce)
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
+ coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
@@ -2535,6 +2775,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
+ ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
+ kfree(coalesce);
+
return 0;
err_vectors:
@@ -2549,6 +2792,7 @@ err_rings:
err_vsi:
ice_vsi_clear(vsi);
set_bit(__ICE_RESET_FAILED, pf->state);
+ kfree(coalesce);
return ret;
}
@@ -2740,3 +2984,121 @@ cfg_mac_fltr_exit:
ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
return status;
}
+
+/**
+ * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
+ * @sw: switch to check if its default forwarding VSI is free
+ *
+ * Return true if the default forwarding VSI is already being used, else returns
+ * false signalling that it's available to use.
+ */
+bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+{
+ return (sw->dflt_vsi && sw->dflt_vsi_ena);
+}
+
+/**
+ * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
+ * @sw: switch for the default forwarding VSI to compare against
+ * @vsi: VSI to compare against default forwarding VSI
+ *
+ * If this VSI passed in is the default forwarding VSI then return true, else
+ * return false
+ */
+bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+{
+ return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+}
+
+/**
+ * ice_set_dflt_vsi - set the default forwarding VSI
+ * @sw: switch used to assign the default forwarding VSI
+ * @vsi: VSI getting set as the default forwarding VSI on the switch
+ *
+ * If the VSI passed in is already the default VSI and it's enabled just return
+ * success.
+ *
+ * If there is already a default VSI on the switch and it's enabled then return
+ * -EEXIST since there can only be one default VSI per switch.
+ *
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
+ */
+int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+{
+ enum ice_status status;
+ struct device *dev;
+
+ if (!sw || !vsi)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ /* the VSI passed in is already the default VSI */
+ if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
+ vsi->vsi_num);
+ return 0;
+ }
+
+ /* another VSI is already the default VSI for this switch */
+ if (ice_is_dflt_vsi_in_use(sw)) {
+ dev_err(dev,
+ "Default forwarding VSI %d already in use, disable it and try again\n",
+ sw->dflt_vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ if (status) {
+ dev_err(dev,
+ "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return -EIO;
+ }
+
+ sw->dflt_vsi = vsi;
+ sw->dflt_vsi_ena = true;
+
+ return 0;
+}
+
+/**
+ * ice_clear_dflt_vsi - clear the default forwarding VSI
+ * @sw: switch used to clear the default VSI
+ *
+ * If the switch has no default VSI or it's not enabled then return error.
+ *
+ * Otherwise try to clear the default VSI and return the result.
+ */
+int ice_clear_dflt_vsi(struct ice_sw *sw)
+{
+ struct ice_vsi *dflt_vsi;
+ enum ice_status status;
+ struct device *dev;
+
+ if (!sw)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(sw->pf);
+
+ dflt_vsi = sw->dflt_vsi;
+
+ /* there is no default VSI configured */
+ if (!ice_is_dflt_vsi_in_use(sw))
+ return -ENODEV;
+
+ status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ ICE_FLTR_RX);
+ if (status) {
+ dev_err(dev,
+ "Failed to clear the default forwarding VSI %d, error %d\n",
+ dflt_vsi->vsi_num, status);
+ return -EIO;
+ }
+
+ sw->dflt_vsi = NULL;
+ sw->dflt_vsi_ena = false;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6e31e30aba39..68fd0d4505c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -103,4 +103,12 @@ enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
+
+bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
+
+bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
+
+int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
+
+int ice_clear_dflt_vsi(struct ice_sw *sw);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 69bff085acf7..5ae671609f98 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,7 @@
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
@@ -379,25 +379,29 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
- ICE_FLTR_RX);
- if (status) {
- netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags &= ~IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
+ if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
+ err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (err && err != -EEXIST) {
+ netdev_err(netdev,
+ "Error %d setting default VSI %i Rx rule\n",
+ err, vsi->vsi_num);
+ vsi->current_netdev_flags &=
+ ~IFF_PROMISC;
+ goto out_promisc;
+ }
}
} else {
/* Clear Rx filter to remove traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
- ICE_FLTR_RX);
- if (status) {
- netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags |= IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
+ if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
+ err = ice_clear_dflt_vsi(pf->first_sw);
+ if (err) {
+ netdev_err(netdev,
+ "Error %d clearing default VSI %i Rx rule\n",
+ err, vsi->vsi_num);
+ vsi->current_netdev_flags |=
+ IFF_PROMISC;
+ goto out_promisc;
+ }
}
}
}
@@ -472,7 +476,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
/* clear SW filtering DB */
@@ -840,8 +844,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
- if (pf->num_alloc_vfs)
- ice_vc_notify_link_state(pf);
+ ice_vc_notify_link_state(pf);
return result;
}
@@ -1291,7 +1294,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
/* check to see if one of the VFs caused the MDD */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false;
@@ -2330,7 +2333,8 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO;
+ tso_features = NETIF_F_TSO |
+ NETIF_F_GSO_UDP_L4;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
@@ -3568,6 +3572,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
/* required last entry */
{ 0, }
};
@@ -4670,6 +4683,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ if (pf->first_sw->dflt_vsi_ena)
+ dev_info(dev,
+ "Clearing default VSI, re-enable after reset completes\n");
+ /* clear the default VSI configuration if it exists */
+ pf->first_sw->dflt_vsi = NULL;
+ pf->first_sw->dflt_vsi_ena = false;
+
ice_clear_pxe_mode(hw);
ret = ice_get_caps(hw);
@@ -4825,7 +4845,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- netdev_info(netdev, "changed MTU to %d\n", new_mtu);
+ netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
return 0;
}
@@ -5060,42 +5080,23 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void ice_tx_timeout(struct net_device *netdev)
+static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int hung_queue = -1;
u32 i;
pf->tx_timeout_count++;
- /* find the stopped queue the same way dev_watchdog() does */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- unsigned long trans_start;
- struct netdev_queue *q;
-
- q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start;
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- trans_start + netdev->watchdog_timeo)) {
- hung_queue = i;
- break;
- }
- }
-
- if (i == netdev->num_tx_queues)
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- else
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- if (hung_queue == vsi->tx_rings[i]->q_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_txq; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ if (txqueue == vsi->tx_rings[i]->q_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
@@ -5110,19 +5111,19 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
- head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
- vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
+ vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
- pf->tx_timeout_recovery_level, hung_queue);
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
case 1:
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 57c73f613f32..7525ac50742e 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -289,6 +289,18 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+ /* the following devices do not have boot_cfg_tlv yet */
+ if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
+ hw->device_id == ICE_DEV_ID_E822C_QSFP ||
+ hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
+ hw->device_id == ICE_DEV_ID_E822C_SGMII ||
+ hw->device_id == ICE_DEV_ID_E822C_SFP ||
+ hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
+ hw->device_id == ICE_DEV_ID_E822L_SFP ||
+ hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
+ hw->device_id == ICE_DEV_ID_E822L_SGMII)
+ return status;
+
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
new file mode 100644
index 000000000000..71647566964e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_PROTOCOL_TYPE_H_
+#define _ICE_PROTOCOL_TYPE_H_
+/* Decoders for ice_prot_id:
+ * - F: First
+ * - I: Inner
+ * - L: Last
+ * - O: Outer
+ * - S: Single
+ */
+enum ice_prot_id {
+ ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_IPV4_OF_OR_S = 32,
+ ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV6_OF_OR_S = 40,
+ ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_SCTP_IL = 96,
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
+ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
+};
+#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index c01597885629..a9a8bc3aca42 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -26,6 +26,7 @@ enum ice_status {
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
+ ICE_ERR_HW_TABLE = -19,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index b5a53f862a83..431266081a80 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -50,42 +50,6 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
/**
- * ice_aq_alloc_free_res - command to allocate/free resources
- * @hw: pointer to the HW struct
- * @num_entries: number of resource entries in buffer
- * @buf: Indirect buffer to hold data parameters and response
- * @buf_size: size of buffer for indirect commands
- * @opc: pass in the command opcode
- * @cd: pointer to command details structure or NULL
- *
- * Helper function to allocate/free resources using the admin queue commands
- */
-static enum ice_status
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
-{
- struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.sw_res_ctrl;
-
- if (!buf)
- return ICE_ERR_PARAM;
-
- if (buf_size < (num_entries * sizeof(buf->elem[0])))
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd->num_entries = cpu_to_le16(num_entries);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 2c212f64d99f..fd17ace6b226 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1071,13 +1071,16 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ice_put_rx_buf(rx_ring, rx_buf);
continue;
construct_skb:
- if (skb)
+ if (skb) {
ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- else if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
+ } else if (likely(xdp.data)) {
+ if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
+ } else {
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
-
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
@@ -1925,6 +1928,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
@@ -1958,10 +1962,18 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* remove payload length from checksum */
paylen = skb->len - l4_start;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of UDP segmentation header */
+ off->header_len = sizeof(l4.udp) + l4_start;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of TCP segmentation header */
+ off->header_len = (l4.tcp->doff * 4) + l4_start;
+ }
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a84cc0e6dd27..a86270696df1 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -341,6 +341,12 @@ struct ice_ring_container {
u16 itr_setting;
};
+struct ice_coalesce_stored {
+ u16 itr_tx;
+ u16 itr_rx;
+ u8 intrl;
+};
+
/* iterator for handling rings in ring container */
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index c4854a987130..b361ffabb0ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -13,6 +13,7 @@
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
+#include "ice_protocol_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -41,6 +42,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
+#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_PKG BIT_ULL(16)
@@ -559,6 +561,10 @@ struct ice_hw {
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
+ struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
+ struct list_head fl_profs[ICE_BLK_COUNT];
+ struct mutex rss_locks; /* protect RSS configuration */
+ struct list_head rss_list_head;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index edb374296d1f..82b1e7a4cb92 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -35,37 +35,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_err_to_virt err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -78,10 +47,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf = pf->vf;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
@@ -121,26 +91,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
- * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
- * @vf: pointer to the VF structure
- * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
- * @link_up: whether or not to set the link up/down
- */
-static void
-ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
- bool link_up)
-{
- u16 link_speed;
-
- if (link_up)
- link_speed = ICE_AQ_LINK_SPEED_100GB;
- else
- link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
-
- ice_set_pfe_link(vf, pfe, link_speed, link_up);
-}
-
-/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -160,13 +110,17 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
pfe.severity = PF_EVENT_SEVERITY_INFO;
/* Always report link is down if the VF queues aren't enabled */
- if (!vf->num_qs_ena)
+ if (!vf->num_qs_ena) {
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
- else if (vf->link_forced)
- ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
- else
- ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
- ICE_AQ_LINK_UP);
+ } else if (vf->link_forced) {
+ u16 link_speed = vf->link_up ?
+ ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
+ } else {
+ ice_set_pfe_link(vf, &pfe, ls->link_speed,
+ ls->link_info & ICE_AQ_LINK_UP);
+ }
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
@@ -331,7 +285,7 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]);
@@ -991,10 +945,17 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
+ struct ice_vsi *vsi;
+
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- vf->num_vlan = 0;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_add_vlan(vsi, 0))
+ dev_warn(ice_pf_to_dev(pf),
+ "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
+ vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
@@ -1023,7 +984,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
struct ice_hw *hw;
hw = &pf->hw;
- if (vf->num_vlan) {
+ if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
} else if (vf->port_vlan_id) {
@@ -1070,7 +1031,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_irq_dynamic_ena(hw, NULL, NULL);
/* Finish resetting each VF and allocate resources */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
@@ -1113,10 +1074,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
struct ice_vsi *vsi;
vf = &pf->vf[v];
@@ -1161,7 +1122,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
ice_free_vf_res(vf);
@@ -1273,7 +1234,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_id || vf->num_vlan)
+ if (vf->port_vlan_id || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
@@ -1301,7 +1262,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf)
{
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
ice_vc_notify_vf_link_state(&pf->vf[i]);
}
@@ -1385,9 +1346,10 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_pci_disable_sriov;
}
pf->vf = vfs;
+ pf->num_alloc_vfs = num_alloc_vfs;
/* apply default profile */
- for (i = 0; i < num_alloc_vfs; i++) {
+ ice_for_each_vf(pf, i) {
vfs[i].pf = pf;
vfs[i].vf_sw_id = pf->first_sw;
vfs[i].vf_id = i;
@@ -1396,7 +1358,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
}
- pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated with initialization */
if (!ice_config_res_vfs(pf)) {
@@ -1535,7 +1496,7 @@ void ice_process_vflr_event(struct ice_pf *pf)
!pf->num_alloc_vfs)
return;
- for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
@@ -1918,6 +1879,89 @@ error_param:
}
/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi_ctx *ctx;
+ struct ice_vsi *vf_vsi;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+
+ if (ice_check_vf_init(pf, vf))
+ return -EBUSY;
+
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ return -EINVAL;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev,
+ "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ return -ENODEV;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vf_vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ if (ena) {
+ ctx->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ } else {
+ ctx->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
+ }
+
+ status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev,
+ "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* only update spoofchk state and VSI context on success */
+ vf_vsi->info.sec_flags = ctx->info.sec_flags;
+ vf->spoofchk = ena;
+
+out:
+ kfree(ctx);
+ return ret;
+}
+
+/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2409,6 +2453,83 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
}
/**
+ * ice_vc_add_mac_addr - attempt to add the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @mac_addr: MAC address to add
+ */
+static int
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+
+ /* default unicast MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ return 0;
+
+ if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
+ dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return -EPERM;
+ }
+
+ status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -EEXIST;
+ } else if (status) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ /* only set dflt_lan_addr once */
+ if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
+ is_unicast_ether_addr(mac_addr))
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
+
+ vf->num_mac++;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @mac_addr: MAC address to delete
+ */
+static int
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+
+ if (!ice_can_vf_change_mac(vf) &&
+ ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ return 0;
+
+ status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -ENOENT;
+ } else if (status) {
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ eth_zero_addr(vf->dflt_lan_addr.addr);
+
+ vf->num_mac--;
+
+ return 0;
+}
+
+/**
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2419,23 +2540,23 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ int (*ice_vc_cfg_mac)
+ (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status status;
struct ice_vsi *vsi;
- struct device *dev;
- int mac_count = 0;
int i;
- dev = ice_pf_to_dev(pf);
-
- if (set)
+ if (set) {
vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- else
+ ice_vc_cfg_mac = ice_vc_add_mac_addr;
+ } else {
vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_del_mac_addr;
+ }
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
@@ -2443,14 +2564,15 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
+ /* If this VF is not privileged, then we can't add more than a
+ * limited number of addresses. Check to make sure that the
+ * additions do not push us over the limit.
+ */
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(dev,
+ dev_err(ice_pf_to_dev(pf),
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
- /* There is no need to let VF know about not being trusted
- * to add more MAC addr, so we can just return success message.
- */
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
@@ -2462,70 +2584,22 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
}
for (i = 0; i < al->num_elements; i++) {
- u8 *maddr = al->list[i].addr;
+ u8 *mac_addr = al->list[i].addr;
+ int result;
- if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
- is_broadcast_ether_addr(maddr)) {
- if (set) {
- /* VF is trying to add filters that the PF
- * already added. Just continue.
- */
- dev_info(dev,
- "MAC %pM already set for VF %d\n",
- maddr, vf->vf_id);
- continue;
- } else {
- /* VF can't remove dflt_lan_addr/bcast MAC */
- dev_err(dev,
- "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
- maddr, vf->vf_id);
- continue;
- }
- }
-
- /* check for the invalid cases and bail if necessary */
- if (is_zero_ether_addr(maddr)) {
- dev_err(dev,
- "invalid MAC %pM provided for VF %d\n",
- maddr, vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- if (is_unicast_ether_addr(maddr) &&
- !ice_can_vf_change_mac(vf)) {
- dev_err(dev,
- "can't change unicast MAC for untrusted VF %d\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
+ if (is_broadcast_ether_addr(mac_addr) ||
+ is_zero_ether_addr(mac_addr))
+ continue;
- /* program the updated filter list */
- status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
- if (status == ICE_ERR_DOES_NOT_EXIST ||
- status == ICE_ERR_ALREADY_EXISTS) {
- dev_info(dev,
- "can't %s MAC filters %pM for VF %d, error %d\n",
- set ? "add" : "remove", maddr, vf->vf_id,
- status);
- } else if (status) {
- dev_err(dev,
- "can't %s MAC filters for VF %d, error %d\n",
- set ? "add" : "remove", vf->vf_id, status);
- v_ret = ice_err_to_virt_err(status);
+ result = ice_vc_cfg_mac(vf, vsi, mac_addr);
+ if (result == -EEXIST || result == -ENOENT) {
+ continue;
+ } else if (result) {
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto handle_mac_exit;
}
-
- mac_count++;
}
- /* Track number of MAC filters programmed for the VF VSI */
- if (set)
- vf->num_mac += mac_count;
- else
- vf->num_mac -= mac_count;
-
handle_mac_exit:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
@@ -2744,17 +2818,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (add_v && !ice_is_vf_trusted(vf) &&
- vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being not trusted,
- * so we can just return success message here
- */
- goto error_param;
- }
-
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2771,6 +2834,17 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(dev,
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
+ goto error_param;
+ }
+
if (vsi->info.pvid) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2785,7 +2859,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
u16 vid = vfl->vlan_id[i];
if (!ice_is_vf_trusted(vf) &&
- vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
@@ -2796,12 +2870,20 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (ice_vsi_add_vlan(vsi, vid)) {
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't need to add it again here
+ */
+ if (!vid)
+ continue;
+
+ status = ice_vsi_add_vlan(vsi, vid);
+ if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vf->num_vlan++;
+ vsi->num_vlan++;
/* Enable VLAN pruning when VLAN is added */
if (!vlan_promisc) {
status = ice_cfg_vlan_pruning(vsi, true, false);
@@ -2837,21 +2919,29 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
int num_vf_vlan;
- num_vf_vlan = vf->num_vlan;
+ num_vf_vlan = vsi->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't want a VIRTCHNL request to remove it
+ */
+ if (!vid)
+ continue;
+
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (ice_vsi_kill_vlan(vsi, vid)) {
+ status = ice_vsi_kill_vlan(vsi, vid);
+ if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vf->num_vlan--;
+ vsi->num_vlan--;
/* Disable VLAN pruning when the last VLAN is removed */
- if (!vf->num_vlan)
+ if (!vsi->num_vlan)
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
@@ -3165,65 +3255,6 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi = pf->vsi[0];
- struct ice_vsi_ctx *ctx;
- enum ice_status status;
- struct device *dev;
- struct ice_vf *vf;
- int ret = 0;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n",
- ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
-
- if (ena) {
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
- ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
- }
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_dbg(dev,
- "Error %d, failed to update VSI* parameters\n", status);
- ret = -EIO;
- goto out;
- }
-
- vf->spoofchk = ena;
- vsi->info.sec_flags = ctx->info.sec_flags;
- vsi->info.sw_flags2 = ctx->info.sw_flags2;
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
* ice_wait_on_vf_reset
* @vf: The VF being resseting
*
@@ -3344,28 +3375,18 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct virtchnl_pf_event pfe = { 0 };
- struct ice_link_status *ls;
struct ice_vf *vf;
- struct ice_hw *hw;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- hw = &pf->hw;
- ls = &pf->hw.port_info->phy.link_info;
-
if (ice_check_vf_init(pf, vf))
return -EBUSY;
- pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = PF_EVENT_SEVERITY_INFO;
-
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
- vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
@@ -3379,15 +3400,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
return -EINVAL;
}
- if (vf->link_forced)
- ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
- else
- ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
-
- /* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
- sizeof(pfe), NULL);
+ ice_vc_notify_vf_link_state(vf);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 88aa65d5cb31..4647d636ed36 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -40,6 +40,9 @@
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_WAIT 15
+#define ice_for_each_vf(pf, i) \
+ for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
+
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
@@ -91,7 +94,6 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
- u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index cf9b8b22d24f..149dca0012ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -414,7 +414,8 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (!vsi->num_xsk_umems)
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
@@ -555,7 +556,7 @@ ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom;
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -591,7 +592,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom;
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
@@ -1019,8 +1020,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
s16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
- bool xmit_done = true;
u32 xsk_frames = 0;
+ bool xmit_done;
tx_desc = ICE_TX_DESC(xdp_ring, ntc);
tx_buf = &xdp_ring->tx_buf[ntc];
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8a6ef3514129..438b42ce2cd9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ca54e268d157..49b5fa9d4783 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -661,6 +661,7 @@ void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
+void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 4690d6c87f39..f96ffa83efbe 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
- if (eth_flags->e100_base_fx) {
+ if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}
@@ -396,6 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int retval = 0;
+ int i;
/* 100basefx does not support setting link flow control */
if (hw->dev_spec._82575.eth_flags.e100_base_fx)
@@ -428,6 +429,13 @@ static int igb_set_pauseparam(struct net_device *netdev,
retval = ((hw->phy.media_type == e1000_media_type_copper) ?
igb_force_mac_fc(hw) : igb_setup_link(hw));
+
+ /* Make sure SRRCTL considers new fc settings for each ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ igb_setup_srrctl(adapter, ring);
+ }
}
clear_bit(__IGB_RESETTING, &adapter->state);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 98346eb064d5..b46bff8fe056 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net_device *netdev,
netdev_features_t features);
@@ -4468,6 +4468,37 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
}
/**
+ * igb_setup_srrctl - configure the split and replication receive control
+ * registers
+ * @adapter: Board private structure
+ * @ring: receive ring to be configured
+ **/
+void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ if (hw->mac.type >= e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
+ /* Only set Drop Enable if VFs allocated, or we are supporting multiple
+ * queues and rx flow control is disabled
+ */
+ if (adapter->vfs_allocated_count ||
+ (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
+ adapter->num_rx_queues > 1))
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ wr32(E1000_SRRCTL(reg_idx), srrctl);
+}
+
+/**
* igb_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure
* @ring: receive ring to be configured
@@ -4481,7 +4512,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
- u32 srrctl = 0, rxdctl = 0;
+ u32 rxdctl = 0;
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4499,19 +4530,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
writel(0, ring->tail);
/* set descriptor configuration */
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (hw->mac.type >= e1000_82580)
- srrctl |= E1000_SRRCTL_TIMESTAMP;
- /* Only set Drop Enable if we are supporting multiple queues */
- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
- srrctl |= E1000_SRRCTL_DROP_EN;
-
- wr32(E1000_SRRCTL(reg_idx), srrctl);
+ igb_setup_srrctl(adapter, ring);
/* set filtering for VMDQ pools */
igb_set_vmolr(adapter, reg_idx & 0x7, true);
@@ -6184,7 +6203,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void igb_tx_timeout(struct net_device *netdev)
+static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 6003dc3ff5fd..5b1800c3ba82 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2375,7 +2375,7 @@ static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
* igbvf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void igbvf_tx_timeout(struct net_device *netdev)
+static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 88c6f88baac5..49fb1e1965cd 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o
+igc_ethtool.o igc_ptp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 0868677d43ed..52066bdbbad0 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -10,6 +10,9 @@
#include <linux/vmalloc.h>
#include <linux/ethtool.h>
#include <linux/sctp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
#include "igc_hw.h"
@@ -45,11 +48,15 @@ extern char igc_driver_version[];
#define IGC_REGS_LEN 740
#define IGC_RETA_SIZE 128
+/* flags controlling PTP/1588 function */
+#define IGC_PTP_ENABLED BIT(0)
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
+#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -100,6 +107,20 @@ extern char igc_driver_version[];
#define AUTO_ALL_MODES 0
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
+/* Transmit and receive latency (for PTP timestamps) */
+/* FIXME: These values were estimated using the ones that i210 has as
+ * basis, they seem to provide good numbers with ptp4l/phc2sys, but we
+ * need to confirm them.
+ */
+#define IGC_I225_TX_LATENCY_10 9542
+#define IGC_I225_TX_LATENCY_100 1024
+#define IGC_I225_TX_LATENCY_1000 178
+#define IGC_I225_TX_LATENCY_2500 64
+#define IGC_I225_RX_LATENCY_10 20662
+#define IGC_I225_RX_LATENCY_100 2213
+#define IGC_I225_RX_LATENCY_1000 448
+#define IGC_I225_RX_LATENCY_2500 160
+
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
@@ -370,6 +391,8 @@ struct igc_adapter {
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
+ u32 wol;
+ u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
@@ -430,6 +453,20 @@ struct igc_adapter {
unsigned long link_check_timeout;
struct igc_info ei;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
+ /* System time value lock */
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
};
/* igc_desc_unused - calculate if we have unused descriptors */
@@ -513,6 +550,16 @@ int igc_add_filter(struct igc_adapter *adapter,
int igc_erase_filter(struct igc_adapter *adapter,
struct igc_nfc_filter *input);
+void igc_ptp_init(struct igc_adapter *adapter);
+void igc_ptp_reset(struct igc_adapter *adapter);
+void igc_ptp_stop(struct igc_adapter *adapter);
+void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ struct sk_buff *skb);
+int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igc_ptp_tx_hang(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index db289bcce21d..5a506440560a 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -212,6 +212,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f3788f0b95b4..58efa7a02c68 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -10,6 +10,37 @@
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
+
+/* Wake Up Filter Control */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+
+#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+
+/* Wake Up Status */
+#define IGC_WUS_EX 0x00000004 /* Directed Exact */
+#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */
+#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */
+#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */
+#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
+
+/* Packet types that are enabled for wake packet delivery */
+#define WAKE_PKT_WUS ( \
+ IGC_WUS_EX | \
+ IGC_WUS_ARPD | \
+ IGC_WUS_IPV4 | \
+ IGC_WUS_IPV6 | \
+ IGC_WUS_NSD)
+
+/* Wake Up Packet Length */
+#define IGC_WUPL_MASK 0x00000FFF
+
+/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
+#define IGC_WUPM_BYTES 128
+
/* Physical Func Reset Done Indication */
#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -187,6 +218,7 @@
#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
#define IGC_ICR_RXO BIT(6) /* Rx overrun */
#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
+#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */
#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
@@ -209,6 +241,7 @@
#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */
#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
@@ -249,6 +282,10 @@
#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+/* IPSec Encrypt Enable */
+#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
@@ -281,12 +318,21 @@
#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
+/* Split Replication Receive Control */
+#define IGC_SRRCTL_TIMESTAMP 0x40000000
+#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14)
+#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
+
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+/* Advanced Receive Descriptor bit definitions */
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
#define IGC_RXDEXT_STATERR_SEQ 0x04000000
@@ -323,6 +369,61 @@
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+
+/* Time Sync Interrupt Causes */
+#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
+#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+
+#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS
+
+/* PTP Queue Filter */
+#define IGC_ETQF_1588 BIT(30)
+
+#define IGC_FTQF_VF_BP 0x00008000
+#define IGC_FTQF_1588_TIME_STAMP 0x08000000
+#define IGC_FTQF_MASK 0xF0000000
+#define IGC_FTQF_MASK_PROTO_BP 0x10000000
+
+/* Time Sync Receive Control bit definitions */
+#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IGC_TSYNCRXCTL_TYPE_ALL 0x08
+#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */
+
+/* Time Sync Receive Configuration */
+#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+
+/* Immediate Interrupt Receive */
+#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+/* Immediate Interrupt Receive Extended */
+#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+
+/* Time Sync Transmit Control bit definitions */
+#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
@@ -363,6 +464,7 @@
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 455c1cdceb6e..ee07011e13e9 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1600,6 +1600,39 @@ static int igc_set_channels(struct net_device *netdev,
return 0;
}
+static int igc_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static u32 igc_get_priv_flags(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -1847,6 +1880,7 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,
+ .get_ts_info = igc_get_ts_info,
.get_channels = igc_get_channels,
.set_channels = igc_set_channels,
.get_priv_flags = igc_get_priv_flags,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 20f710645746..90ac0e0144d8 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -21,8 +21,7 @@
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
-
-#define IGC_FUNC_0 0
+#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
struct igc_mac_operations {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9700527dd797..d9d5425fe8d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -8,6 +8,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
+#include <linux/pm_runtime.h>
#include <net/ipv6.h>
@@ -44,31 +45,13 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
-/* forward declaration */
-static void igc_clean_tx_ring(struct igc_ring *tx_ring);
-static int igc_sw_init(struct igc_adapter *);
-static void igc_configure(struct igc_adapter *adapter);
-static void igc_power_down_link(struct igc_adapter *adapter);
-static void igc_set_default_mac_filter(struct igc_adapter *adapter);
-static void igc_set_rx_mode(struct net_device *netdev);
-static void igc_write_itr(struct igc_q_vector *q_vector);
-static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
-static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
-static void igc_set_interrupt_capability(struct igc_adapter *adapter,
- bool msix);
-static void igc_free_q_vectors(struct igc_adapter *adapter);
-static void igc_irq_disable(struct igc_adapter *adapter);
-static void igc_irq_enable(struct igc_adapter *adapter);
-static void igc_configure_msix(struct igc_adapter *adapter);
-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
- struct igc_rx_buffer *bi);
-
enum latency_range {
lowest_latency = 0,
low_latency = 1,
@@ -76,6 +59,16 @@ enum latency_range {
latency_invalid = 255
};
+/**
+ * igc_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_down_link(struct igc_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_down_phy_copper_base(&adapter->hw);
+}
+
void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -110,11 +103,14 @@ void igc_reset(struct igc_adapter *adapter)
if (!netif_running(adapter->netdev))
igc_power_down_link(adapter);
+ /* Re-enable PTP, where applicable. */
+ igc_ptp_reset(adapter);
+
igc_get_phy_info(hw);
}
/**
- * igc_power_up_link - Power up the phy/serdes link
+ * igc_power_up_link - Power up the phy link
* @adapter: address of board private structure
*/
static void igc_power_up_link(struct igc_adapter *adapter)
@@ -128,16 +124,6 @@ static void igc_power_up_link(struct igc_adapter *adapter)
}
/**
- * igc_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
- */
-static void igc_power_down_link(struct igc_adapter *adapter)
-{
- if (adapter->hw.phy.media_type == igc_media_type_copper)
- igc_power_down_phy_copper_base(&adapter->hw);
-}
-
-/**
* igc_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
@@ -176,43 +162,6 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
}
/**
- * igc_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
- *
- * Free all transmit software resources
- */
-void igc_free_tx_resources(struct igc_ring *tx_ring)
-{
- igc_clean_tx_ring(tx_ring);
-
- vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!tx_ring->desc)
- return;
-
- dma_free_coherent(tx_ring->dev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
-
- tx_ring->desc = NULL;
-}
-
-/**
- * igc_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all transmit software resources
- */
-static void igc_free_all_tx_resources(struct igc_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igc_free_tx_resources(adapter->tx_ring[i]);
-}
-
-/**
* igc_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
*/
@@ -274,6 +223,43 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
}
/**
+ * igc_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ */
+void igc_free_tx_resources(struct igc_ring *tx_ring)
+{
+ igc_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_all_tx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
* igc_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
*/
@@ -771,6 +757,51 @@ static void igc_setup_tctl(struct igc_adapter *adapter)
}
/**
+ * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: address of board private structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ */
+static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+{
+ u8 *addr = adapter->mac_table[index].addr;
+ struct igc_hw *hw = &adapter->hw;
+ u32 rar_low, rar_high;
+
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
+ */
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
+ if (is_valid_ether_addr(addr))
+ rar_high |= IGC_RAH_AV;
+
+ rar_high |= IGC_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
+
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igc_set_default_mac_filter(struct igc_adapter *adapter)
+{
+ struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, 0);
+}
+
+/**
* igc_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -850,7 +881,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
- /* For 82575, context index must be unique per ring. */
+ /* For i225, context index must be unique per ring. */
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4;
@@ -957,6 +988,11 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
return __igc_maybe_stop_tx(tx_ring, size);
}
+#define IGC_SET_FLAG(_input, _flag, _result) \
+ (((_flag) <= (_result)) ? \
+ ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
+ ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
+
static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
@@ -964,6 +1000,14 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS;
+ /* set segmentation bits for TSO */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
+ (IGC_ADVTXD_DCMD_TSE));
+
+ /* set timestamp bit if present */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+
return cmd_type;
}
@@ -1131,6 +1175,100 @@ dma_error:
return -1;
}
+static int igc_tso(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
+ u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ struct sk_buff *skb = first->skb;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
+ type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
+ first->tx_flags |= IGC_TX_FLAGS_TSO |
+ IGC_TX_FLAGS_CSUM |
+ IGC_TX_FLAGS_IPV4;
+ } else {
+ ip.v6->payload_len = 0;
+ first->tx_flags |= IGC_TX_FLAGS_TSO |
+ IGC_TX_FLAGS_CSUM;
+ }
+
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
+
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+
+ igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1140,6 +1278,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
u32 tx_flags = 0;
unsigned short f;
u8 hdr_len = 0;
+ int tso = 0;
/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
@@ -1162,15 +1301,45 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ /* FIXME: add support for retrieving timestamps from
+ * the other timer registers before skipping the
+ * timestamping request.
+ */
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGC_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
- igc_tx_csum(tx_ring, first);
+ tso = igc_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+ igc_tx_csum(tx_ring, first);
igc_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
+ return NETDEV_TX_OK;
}
static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
@@ -1269,6 +1438,10 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
+ if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) &&
+ !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))
+ igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -1388,6 +1561,12 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
+ if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
+ igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGC_TS_HDR_LEN;
+ size -= IGC_TS_HDR_LEN;
+ }
+
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGC_RX_HDR_LEN)
@@ -1485,7 +1664,6 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
* igc_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1565,9 +1743,56 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
rx_buffer->page = NULL;
}
+static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+}
+
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = igc_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
+
+ return true;
+}
+
/**
* igc_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring
+ * @cleaned_count: number of buffers to clean
*/
static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
{
@@ -1725,52 +1950,6 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
return total_packets;
}
-static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
-}
-
-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
- struct igc_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
-
- /* alloc new page for storage */
- page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- igc_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IGC_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
-
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = igc_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
/**
* igc_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -1942,6 +2121,1128 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
return !!budget;
}
+static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, 0))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].state != 0)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue = 0;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return min_t(int, ret, 0);
+}
+
+static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return 0;
+}
+
+/**
+ * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void igc_set_rx_mode(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IGC_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igc_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= IGC_RCTL_MPE;
+ }
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
+ rctl |= IGC_RCTL_UPE;
+
+ /* update state of unicast and multicast */
+ rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+ wr32(IGC_RCTL, rctl);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
+ rlpml = IGC_MAX_FRAME_BUILD_SKB;
+#endif
+ wr32(IGC_RLPML, rlpml);
+}
+
+/**
+ * igc_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ */
+static void igc_configure(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i = 0;
+
+ igc_get_hw_control(adapter);
+ igc_set_rx_mode(netdev);
+
+ igc_setup_tctl(adapter);
+ igc_setup_mrqc(adapter);
+ igc_setup_rctl(adapter);
+
+ igc_nfc_filter_restore(adapter);
+ igc_configure_tx(adapter);
+ igc_configure_rx(adapter);
+
+ igc_rx_fifo_flush_base(&adapter->hw);
+
+ /* call igc_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ }
+}
+
+/**
+ * igc_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ */
+static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = array_rd32(IGC_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
+
+ array_wr32(IGC_IVAR0, index, ivar);
+}
+
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ int rx_queue = IGC_N0_QUEUE;
+ int tx_queue = IGC_N0_QUEUE;
+
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ if (rx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
+ q_vector->eims_value = BIT(msix_vector);
+ break;
+ default:
+ WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igc_configure_msix - Configure MSI-X hardware
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ */
+static void igc_configure_msix(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i, vector = 0;
+ u32 tmp;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case igc_i225:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug.
+ */
+ wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
+ IGC_GPIE_PBA | IGC_GPIE_EIAME |
+ IGC_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = BIT(vector);
+ tmp = (vector++ | IGC_IVAR_VALID) << 8;
+
+ wr32(IGC_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igc_assign_vector(adapter->q_vector[i], vector++);
+
+ wrfl();
+}
+
+/**
+ * igc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void igc_irq_enable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
+ u32 regval = rd32(IGC_EIAC);
+
+ wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAM);
+ wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
+ wr32(IGC_EIMS, adapter->eims_enable_mask);
+ wr32(IGC_IMS, ims);
+ } else {
+ wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ }
+}
+
+/**
+ * igc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void igc_irq_disable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 regval = rd32(IGC_EIAM);
+
+ wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(IGC_EIMC, adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAC);
+ wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ wr32(IGC_IAM, 0);
+ wr32(IGC_IMC, ~0);
+ wrfl();
+
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ /* Determine if we need to pair queues. */
+ /* If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
+}
+
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+{
+ unsigned int max_rss_queues;
+
+ /* Determine the maximum number of RSS queues supported. */
+ max_rss_queues = IGC_MAX_RX_QUEUES;
+
+ return max_rss_queues;
+}
+
+static void igc_init_queue_configuration(struct igc_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igc_get_max_rss_queues(adapter);
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ igc_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+/**
+ * igc_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igc_free_q_vector.
+ */
+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ /* if we're coming from igc_set_interrupt_capability, the vectors are
+ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ netif_napi_del(&q_vector->napi);
+}
+
+/**
+ * igc_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.
+ */
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igc_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ if (q_vector)
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igc_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ */
+static void igc_free_q_vectors(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--) {
+ igc_reset_q_vector(adapter, v_idx);
+ igc_free_q_vector(adapter, v_idx);
+ }
+}
+
+/**
+ * igc_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ */
+static void igc_update_itr(struct igc_q_vector *q_vector,
+ struct igc_ring_container *ring_container)
+{
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+
+ /* no packets, exit with status unchanged */
+ if (packets == 0)
+ return;
+
+ switch (itrval) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ itrval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 10) || ((bytes / packets) > 1200))
+ itrval = bulk_latency;
+ else if ((packets > 35))
+ itrval = lowest_latency;
+ } else if (bytes / packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ itrval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ itrval = low_latency;
+ } else if (bytes < 1500) {
+ itrval = low_latency;
+ }
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
+}
+
+static void igc_set_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGC_4K_ITR;
+ goto set_itr_now;
+ default:
+ break;
+ }
+
+ igc_update_itr(q_vector, &q_vector->tx);
+ igc_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+
+ while (v_idx--)
+ igc_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ * igc_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: Pointer to adapter structure
+ * @msix: boolean value for MSI-X capability
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix)
+{
+ int numvecs, i;
+ int err;
+
+ if (!msix)
+ goto msi_only;
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ adapter->num_tx_queues = adapter->rss_queues;
+
+ /* start with one vector for every Rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if Tx handler is separate add 1 for every Tx queue */
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+
+ adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries)
+ return;
+
+ /* populate entry values */
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
+ return;
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ igc_reset_interrupt_capability(adapter);
+
+msi_only:
+ adapter->flags &= ~IGC_FLAG_HAS_MSIX;
+
+ adapter->rss_queues = 1;
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGC_FLAG_HAS_MSI;
+}
+
+/**
+ * igc_update_ring_itr - update the dynamic ITR value based on packet size
+ * @q_vector: pointer to q_vector
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igc_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ */
+static void igc_update_ring_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ new_val = IGC_4K_ITR;
+ goto set_itr_val;
+ default:
+ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
+
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if (avg_wire_size > 300 && avg_wire_size < 1200)
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGC_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGC_20K_ITR;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
+}
+
+static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if (adapter->num_q_vectors == 1)
+ igc_set_itr(q_vector);
+ else
+ igc_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(IGC_EIMS, q_vector->eims_value);
+ else
+ igc_irq_enable(adapter);
+ }
+}
+
+static void igc_add_ring(struct igc_ring *ring,
+ struct igc_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * igc_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ */
+static void igc_cache_ring_register(struct igc_adapter *adapter)
+{
+ int i = 0, j = 0;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ /* Fall through */
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = j;
+ break;
+ }
+}
+
+/**
+ * igc_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ */
+static int igc_poll(struct napi_struct *napi, int budget)
+{
+ struct igc_q_vector *q_vector = container_of(napi,
+ struct igc_q_vector,
+ napi);
+ bool clean_complete = true;
+ int work_done = 0;
+
+ if (q_vector->tx.ring)
+ clean_complete = igc_clean_tx_irq(q_vector, budget);
+
+ if (q_vector->rx.ring) {
+ int cleaned = igc_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ if (cleaned >= budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igc_ring_irq_enable(q_vector);
+
+ return min(work_done, budget - 1);
+}
+
+/**
+ * igc_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int igc_alloc_q_vector(struct igc_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+ int ring_count;
+
+ /* igc only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+
+ /* allocate q_vector and rings */
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
+ else
+ memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igc_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
+ q_vector->itr_val = IGC_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* initialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igc_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igc_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int igc_alloc_q_vectors(struct igc_adapter *adapter)
+{
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
+ }
+ }
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igc_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * @adapter: Pointer to adapter structure
+ * @msix: boolean for MSI-X capability
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ */
+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ igc_set_interrupt_capability(adapter, msix);
+
+ err = igc_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ igc_cache_ring_register(adapter);
+
+ return 0;
+
+err_alloc_q_vectors:
+ igc_reset_interrupt_capability(adapter);
+ return err;
+}
+
+/**
+ * igc_sw_init - Initialize general software structures (struct igc_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igc_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int igc_sw_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGC_DEFAULT_TXD;
+ adapter->rx_ring_count = IGC_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGC_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->nfc_lock);
+ spin_lock_init(&adapter->stats64_lock);
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ adapter->mac_table = kzalloc(size, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
+ igc_init_queue_configuration(adapter);
+
+ /* This call may decrease the number of queues */
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igc_irq_disable(adapter);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ return 0;
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2163,18 +3464,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
-static void igc_nfc_filter_restore(struct igc_adapter *adapter)
-{
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
-
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_add_filter(adapter, rule);
-
- spin_unlock(&adapter->nfc_lock);
-}
-
/**
* igc_down - Close the interface
* @adapter: board private structure
@@ -2398,105 +3687,6 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-/**
- * igc_configure - configure the hardware for RX and TX
- * @adapter: private board structure
- */
-static void igc_configure(struct igc_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int i = 0;
-
- igc_get_hw_control(adapter);
- igc_set_rx_mode(netdev);
-
- igc_setup_tctl(adapter);
- igc_setup_mrqc(adapter);
- igc_setup_rctl(adapter);
-
- igc_nfc_filter_restore(adapter);
- igc_configure_tx(adapter);
- igc_configure_rx(adapter);
-
- igc_rx_fifo_flush_base(&adapter->hw);
-
- /* call igc_desc_unused which always leaves
- * at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean
- */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igc_ring *ring = adapter->rx_ring[i];
-
- igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
- }
-}
-
-/**
- * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: address of board private structure
- * @index: Index of the RAR entry which need to be synced with MAC table
- */
-static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
-{
- u8 *addr = adapter->mac_table[index].addr;
- struct igc_hw *hw = &adapter->hw;
- u32 rar_low, rar_high;
-
- /* HW expects these to be in network order when they are plugged
- * into the registers which are little endian. In order to guarantee
- * that ordering we need to do an leXX_to_cpup here in order to be
- * ready for the byteswap that occurs with writel
- */
- rar_low = le32_to_cpup((__le32 *)(addr));
- rar_high = le16_to_cpup((__le16 *)(addr + 4));
-
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
- if (is_valid_ether_addr(addr))
- rar_high |= IGC_RAH_AV;
-
- rar_high |= IGC_RAH_POOL_1 <<
- adapter->mac_table[index].queue;
- }
-
- wr32(IGC_RAL(index), rar_low);
- wrfl();
- wr32(IGC_RAH(index), rar_high);
- wrfl();
-}
-
-/* Set default MAC address for the PF in the first RAR entry */
-static void igc_set_default_mac_filter(struct igc_adapter *adapter)
-{
- struct igc_mac_addr *mac_table = &adapter->mac_table[0];
-
- ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
- mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
-
- igc_rar_set_index(adapter, 0);
-}
-
-/* If the filter to be added and an already existing filter express
- * the same address and address type, it should be possible to only
- * override the other configurations, for example the queue to steer
- * traffic.
- */
-static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
- const u8 *addr, const u8 flags)
-{
- if (!(entry->state & IGC_MAC_STATE_IN_USE))
- return true;
-
- if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
- (flags & IGC_MAC_STATE_SRC_ADDR))
- return false;
-
- if (!ether_addr_equal(addr, entry->addr))
- return false;
-
- return true;
-}
-
/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
* 'flags' is used to indicate what kind of match is made, match is by
* default for the destination address, if matching by source address
@@ -2597,159 +3787,20 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags);
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, 0))
- continue;
-
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
-
- igc_rar_set_index(adapter, i);
- return i;
- }
-
- return -ENOSPC;
-}
-
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_del_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if (adapter->mac_table[i].state != 0)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
-
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- adapter->mac_table[i].queue = 0;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
+ u32 tsicr = rd32(IGC_TSICR);
+ u32 ack = 0;
- igc_rar_set_index(adapter, i);
- return 0;
+ if (tsicr & IGC_TSICR_TXTS) {
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ ack |= IGC_TSICR_TXTS;
}
- return -ENOENT;
-}
-
-static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
- int ret;
-
- ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return min_t(int, ret, 0);
-}
-
-static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return 0;
-}
-
-/**
- * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
- */
-static void igc_set_rx_mode(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
- struct igc_hw *hw = &adapter->hw;
- u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
- int count;
-
- /* Check for Promiscuous and All Multicast modes */
- if (netdev->flags & IFF_PROMISC) {
- rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= IGC_RCTL_MPE;
- } else {
- /* Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- count = igc_write_mc_addr_list(netdev);
- if (count < 0)
- rctl |= IGC_RCTL_MPE;
- }
- }
-
- /* Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
- rctl |= IGC_RCTL_UPE;
-
- /* update state of unicast and multicast */
- rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
- wr32(IGC_RCTL, rctl);
-
-#if (PAGE_SIZE < 8192)
- if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
- rlpml = IGC_MAX_FRAME_BUILD_SKB;
-#endif
- wr32(IGC_RLPML, rlpml);
+ /* acknowledge the interrupts */
+ wr32(IGC_TSICR, ack);
}
/**
@@ -2779,114 +3830,28 @@ static irqreturn_t igc_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
wr32(IGC_EIMS, adapter->eims_other);
return IRQ_HANDLED;
}
-/**
- * igc_write_ivar - configure ivar for given MSI-X vector
- * @hw: pointer to the HW structure
- * @msix_vector: vector number we are allocating to a given ring
- * @index: row index of IVAR register to write within IVAR table
- * @offset: column offset of in IVAR, should be multiple of 8
- *
- * The IVAR table consists of 2 columns,
- * each containing an cause allocation for an Rx and Tx ring, and a
- * variable number of rows depending on the number of queues supported.
- */
-static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
- int index, int offset)
-{
- u32 ivar = array_rd32(IGC_IVAR0, index);
-
- /* clear any bits that are currently set */
- ivar &= ~((u32)0xFF << offset);
-
- /* write vector and valid bit */
- ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
-
- array_wr32(IGC_IVAR0, index, ivar);
-}
-
-static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- struct igc_hw *hw = &adapter->hw;
- int rx_queue = IGC_N0_QUEUE;
- int tx_queue = IGC_N0_QUEUE;
-
- if (q_vector->rx.ring)
- rx_queue = q_vector->rx.ring->reg_idx;
- if (q_vector->tx.ring)
- tx_queue = q_vector->tx.ring->reg_idx;
-
- switch (hw->mac.type) {
- case igc_i225:
- if (rx_queue > IGC_N0_QUEUE)
- igc_write_ivar(hw, msix_vector,
- rx_queue >> 1,
- (rx_queue & 0x1) << 4);
- if (tx_queue > IGC_N0_QUEUE)
- igc_write_ivar(hw, msix_vector,
- tx_queue >> 1,
- ((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = BIT(msix_vector);
- break;
- default:
- WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
- break;
- }
-
- /* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= q_vector->eims_value;
-
- /* configure q_vector to set itr on first interrupt */
- q_vector->set_itr = 1;
-}
-
-/**
- * igc_configure_msix - Configure MSI-X hardware
- * @adapter: Pointer to adapter structure
- *
- * igc_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
- */
-static void igc_configure_msix(struct igc_adapter *adapter)
+static void igc_write_itr(struct igc_q_vector *q_vector)
{
- struct igc_hw *hw = &adapter->hw;
- int i, vector = 0;
- u32 tmp;
-
- adapter->eims_enable_mask = 0;
-
- /* set vector for other causes, i.e. link changes */
- switch (hw->mac.type) {
- case igc_i225:
- /* Turn on MSI-X capability first, or our settings
- * won't stick. And it will take days to debug.
- */
- wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
- IGC_GPIE_PBA | IGC_GPIE_EIAME |
- IGC_GPIE_NSICR);
-
- /* enable msix_other interrupt */
- adapter->eims_other = BIT(vector);
- tmp = (vector++ | IGC_IVAR_VALID) << 8;
+ u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
- wr32(IGC_IVAR_MISC, tmp);
- break;
- default:
- /* do nothing, since nothing else supports MSI-X */
- break;
- } /* switch (hw->mac.type) */
+ if (!q_vector->set_itr)
+ return;
- adapter->eims_enable_mask |= adapter->eims_other;
+ if (!itr_val)
+ itr_val = IGC_ITR_VAL_MASK;
- for (i = 0; i < adapter->num_q_vectors; i++)
- igc_assign_vector(adapter->q_vector[i], vector++);
+ itr_val |= IGC_EITR_CNT_IGNR;
- wrfl();
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
}
static irqreturn_t igc_msix_ring(int irq, void *data)
@@ -2961,49 +3926,6 @@ err_out:
}
/**
- * igc_reset_q_vector - Reset config for interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be reset
- *
- * If NAPI is enabled it will delete any references to the
- * NAPI struct. This is preparation for igc_free_q_vector.
- */
-static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
-{
- struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
-
- /* if we're coming from igc_set_interrupt_capability, the vectors are
- * not yet allocated
- */
- if (!q_vector)
- return;
-
- if (q_vector->tx.ring)
- adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
-
- if (q_vector->rx.ring)
- adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
-
- netif_napi_del(&q_vector->napi);
-}
-
-static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-
- while (v_idx--)
- igc_reset_q_vector(adapter, v_idx);
-}
-
-/**
* igc_clear_interrupt_scheme - reset the device to a state of no interrupts
* @adapter: Pointer to adapter structure
*
@@ -3016,48 +3938,6 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
igc_reset_interrupt_capability(adapter);
}
-/**
- * igc_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- */
-static void igc_free_q_vectors(struct igc_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--) {
- igc_reset_q_vector(adapter, v_idx);
- igc_free_q_vector(adapter, v_idx);
- }
-}
-
-/**
- * igc_free_q_vector - Free memory allocated for specific interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
- *
- * This function frees the memory allocated to the q_vector.
- */
-static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
-{
- struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
-
- adapter->q_vector[v_idx] = NULL;
-
- /* igc_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- if (q_vector)
- kfree_rcu(q_vector, rcu);
-}
-
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy
*/
@@ -3109,7 +3989,7 @@ bool igc_has_link(struct igc_adapter *adapter)
/**
* igc_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: timer for the watchdog
*/
static void igc_watchdog(struct timer_list *t)
{
@@ -3282,6 +4162,8 @@ no_wait:
wr32(IGC_ICS, IGC_ICS_RXDMT0);
}
+ igc_ptp_tx_hang(adapter);
+
/* Reset the timer */
if (!test_bit(__IGC_DOWN, &adapter->state)) {
if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
@@ -3294,149 +4176,6 @@ no_wait:
}
/**
- * igc_update_ring_itr - update the dynamic ITR value based on packet size
- * @q_vector: pointer to q_vector
- *
- * Stores a new ITR value based on strictly on packet size. This
- * algorithm is less sophisticated than that used in igc_update_itr,
- * due to the difficulty of synchronizing statistics across multiple
- * receive rings. The divisors and thresholds used by this function
- * were determined based on theoretical maximum wire speed and testing
- * data, in order to minimize response time while increasing bulk
- * throughput.
- * NOTE: This function is called only when operating in a multiqueue
- * receive environment.
- */
-static void igc_update_ring_itr(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- int new_val = q_vector->itr_val;
- int avg_wire_size = 0;
- unsigned int packets;
-
- /* For non-gigabit speeds, just fix the interrupt rate at 4000
- * ints/sec - ITR timer value of 120 ticks.
- */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- new_val = IGC_4K_ITR;
- goto set_itr_val;
- default:
- break;
- }
-
- packets = q_vector->rx.total_packets;
- if (packets)
- avg_wire_size = q_vector->rx.total_bytes / packets;
-
- packets = q_vector->tx.total_packets;
- if (packets)
- avg_wire_size = max_t(u32, avg_wire_size,
- q_vector->tx.total_bytes / packets);
-
- /* if avg_wire_size isn't set no work was done */
- if (!avg_wire_size)
- goto clear_counts;
-
- /* Add 24 bytes to size to account for CRC, preamble, and gap */
- avg_wire_size += 24;
-
- /* Don't starve jumbo frames */
- avg_wire_size = min(avg_wire_size, 3000);
-
- /* Give a little boost to mid-size frames */
- if (avg_wire_size > 300 && avg_wire_size < 1200)
- new_val = avg_wire_size / 3;
- else
- new_val = avg_wire_size / 2;
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (new_val < IGC_20K_ITR &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- new_val = IGC_20K_ITR;
-
-set_itr_val:
- if (new_val != q_vector->itr_val) {
- q_vector->itr_val = new_val;
- q_vector->set_itr = 1;
- }
-clear_counts:
- q_vector->rx.total_bytes = 0;
- q_vector->rx.total_packets = 0;
- q_vector->tx.total_bytes = 0;
- q_vector->tx.total_packets = 0;
-}
-
-/**
- * igc_update_itr - update the dynamic ITR value based on statistics
- * @q_vector: pointer to q_vector
- * @ring_container: ring info to update the itr for
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- * NOTE: These calculations are only valid when operating in a single-
- * queue environment.
- */
-static void igc_update_itr(struct igc_q_vector *q_vector,
- struct igc_ring_container *ring_container)
-{
- unsigned int packets = ring_container->total_packets;
- unsigned int bytes = ring_container->total_bytes;
- u8 itrval = ring_container->itr;
-
- /* no packets, exit with status unchanged */
- if (packets == 0)
- return;
-
- switch (itrval) {
- case lowest_latency:
- /* handle TSO and jumbo frames */
- if (bytes / packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 5) && (bytes > 512))
- itrval = low_latency;
- break;
- case low_latency: /* 50 usec aka 20000 ints/s */
- if (bytes > 10000) {
- /* this if handles the TSO accounting */
- if (bytes / packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 10) || ((bytes / packets) > 1200))
- itrval = bulk_latency;
- else if ((packets > 35))
- itrval = lowest_latency;
- } else if (bytes / packets > 2000) {
- itrval = bulk_latency;
- } else if (packets <= 2 && bytes < 512) {
- itrval = lowest_latency;
- }
- break;
- case bulk_latency: /* 250 usec aka 4000 ints/s */
- if (bytes > 25000) {
- if (packets > 35)
- itrval = low_latency;
- } else if (bytes < 1500) {
- itrval = low_latency;
- }
- break;
- }
-
- /* clear work counters since we have the values we need */
- ring_container->total_bytes = 0;
- ring_container->total_packets = 0;
-
- /* write updated itr to ring container */
- ring_container->itr = itrval;
-}
-
-/**
* igc_intr_msi - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
@@ -3513,424 +4252,6 @@ static irqreturn_t igc_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static void igc_set_itr(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- u32 new_itr = q_vector->itr_val;
- u8 current_itr = 0;
-
- /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- current_itr = 0;
- new_itr = IGC_4K_ITR;
- goto set_itr_now;
- default:
- break;
- }
-
- igc_update_itr(q_vector, &q_vector->tx);
- igc_update_itr(q_vector, &q_vector->rx);
-
- current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (current_itr == lowest_latency &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- current_itr = low_latency;
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
- break;
- case low_latency:
- new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
- break;
- case bulk_latency:
- new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
- break;
- default:
- break;
- }
-
-set_itr_now:
- if (new_itr != q_vector->itr_val) {
- /* this attempts to bias the interrupt rate towards Bulk
- * by adding intermediate steps when interrupt rate is
- * increasing
- */
- new_itr = new_itr > q_vector->itr_val ?
- max((new_itr * q_vector->itr_val) /
- (new_itr + (q_vector->itr_val >> 2)),
- new_itr) : new_itr;
- /* Don't write the value here; it resets the adapter's
- * internal timer, and causes us to delay far longer than
- * we should between interrupts. Instead, we write the ITR
- * value at the beginning of the next interrupt so the timing
- * ends up being correct.
- */
- q_vector->itr_val = new_itr;
- q_vector->set_itr = 1;
- }
-}
-
-static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- struct igc_hw *hw = &adapter->hw;
-
- if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
- (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
- if (adapter->num_q_vectors == 1)
- igc_set_itr(q_vector);
- else
- igc_update_ring_itr(q_vector);
- }
-
- if (!test_bit(__IGC_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
- wr32(IGC_EIMS, q_vector->eims_value);
- else
- igc_irq_enable(adapter);
- }
-}
-
-/**
- * igc_poll - NAPI Rx polling callback
- * @napi: napi polling structure
- * @budget: count of how many packets we should handle
- */
-static int igc_poll(struct napi_struct *napi, int budget)
-{
- struct igc_q_vector *q_vector = container_of(napi,
- struct igc_q_vector,
- napi);
- bool clean_complete = true;
- int work_done = 0;
-
- if (q_vector->tx.ring)
- clean_complete = igc_clean_tx_irq(q_vector, budget);
-
- if (q_vector->rx.ring) {
- int cleaned = igc_clean_rx_irq(q_vector, budget);
-
- work_done += cleaned;
- if (cleaned >= budget)
- clean_complete = false;
- }
-
- /* If all work not completed, return budget and keep polling */
- if (!clean_complete)
- return budget;
-
- /* Exit the polling mode, but don't re-enable interrupts if stack might
- * poll us due to busy-polling
- */
- if (likely(napi_complete_done(napi, work_done)))
- igc_ring_irq_enable(q_vector);
-
- return min(work_done, budget - 1);
-}
-
-/**
- * igc_set_interrupt_capability - set MSI or MSI-X if supported
- * @adapter: Pointer to adapter structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- */
-static void igc_set_interrupt_capability(struct igc_adapter *adapter,
- bool msix)
-{
- int numvecs, i;
- int err;
-
- if (!msix)
- goto msi_only;
- adapter->flags |= IGC_FLAG_HAS_MSIX;
-
- /* Number of supported queues. */
- adapter->num_rx_queues = adapter->rss_queues;
-
- adapter->num_tx_queues = adapter->rss_queues;
-
- /* start with one vector for every Rx queue */
- numvecs = adapter->num_rx_queues;
-
- /* if Tx handler is separate add 1 for every Tx queue */
- if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
- numvecs += adapter->num_tx_queues;
-
- /* store the number of vectors reserved for queues */
- adapter->num_q_vectors = numvecs;
-
- /* add 1 vector for link status interrupts */
- numvecs++;
-
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- return;
-
- /* populate entry values */
- for (i = 0; i < numvecs; i++)
- adapter->msix_entries[i].entry = i;
-
- err = pci_enable_msix_range(adapter->pdev,
- adapter->msix_entries,
- numvecs,
- numvecs);
- if (err > 0)
- return;
-
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
-
- igc_reset_interrupt_capability(adapter);
-
-msi_only:
- adapter->flags &= ~IGC_FLAG_HAS_MSIX;
-
- adapter->rss_queues = 1;
- adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_q_vectors = 1;
- if (!pci_enable_msi(adapter->pdev))
- adapter->flags |= IGC_FLAG_HAS_MSI;
-}
-
-static void igc_add_ring(struct igc_ring *ring,
- struct igc_ring_container *head)
-{
- head->ring = ring;
- head->count++;
-}
-
-/**
- * igc_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_count: q_vectors allocated on adapter, used for ring interleaving
- * @v_idx: index of vector in adapter struct
- * @txr_count: total number of Tx rings to allocate
- * @txr_idx: index of first Tx ring to allocate
- * @rxr_count: total number of Rx rings to allocate
- * @rxr_idx: index of first Rx ring to allocate
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int igc_alloc_q_vector(struct igc_adapter *adapter,
- unsigned int v_count, unsigned int v_idx,
- unsigned int txr_count, unsigned int txr_idx,
- unsigned int rxr_count, unsigned int rxr_idx)
-{
- struct igc_q_vector *q_vector;
- struct igc_ring *ring;
- int ring_count;
-
- /* igc only supports 1 Tx and/or 1 Rx queue per vector */
- if (txr_count > 1 || rxr_count > 1)
- return -ENOMEM;
-
- ring_count = txr_count + rxr_count;
-
- /* allocate q_vector and rings */
- q_vector = adapter->q_vector[v_idx];
- if (!q_vector)
- q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
- GFP_KERNEL);
- else
- memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
- if (!q_vector)
- return -ENOMEM;
-
- /* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
-
- /* tie q_vector and adapter together */
- adapter->q_vector[v_idx] = q_vector;
- q_vector->adapter = adapter;
-
- /* initialize work limits */
- q_vector->tx.work_limit = adapter->tx_work_limit;
-
- /* initialize ITR configuration */
- q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
- q_vector->itr_val = IGC_START_ITR;
-
- /* initialize pointer to rings */
- ring = q_vector->ring;
-
- /* initialize ITR */
- if (rxr_count) {
- /* rx or rx/tx vector */
- if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
- q_vector->itr_val = adapter->rx_itr_setting;
- } else {
- /* tx only vector */
- if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
- q_vector->itr_val = adapter->tx_itr_setting;
- }
-
- if (txr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Tx values */
- igc_add_ring(ring, &q_vector->tx);
-
- /* apply Tx specific ring traits */
- ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
-
- /* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
-
- /* push pointer to next ring */
- ring++;
- }
-
- if (rxr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Rx values */
- igc_add_ring(ring, &q_vector->rx);
-
- /* apply Rx specific ring traits */
- ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
-
- /* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
- }
-
- return 0;
-}
-
-/**
- * igc_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int igc_alloc_q_vectors(struct igc_adapter *adapter)
-{
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int rxr_idx = 0, txr_idx = 0, v_idx = 0;
- int q_vectors = adapter->num_q_vectors;
- int err;
-
- if (q_vectors >= (rxr_remaining + txr_remaining)) {
- for (; rxr_remaining; v_idx++) {
- err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
- 0, 0, 1, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining--;
- rxr_idx++;
- }
- }
-
- for (; v_idx < q_vectors; v_idx++) {
- int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
- int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
-
- err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
- tqpv, txr_idx, rqpv, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining -= rqpv;
- txr_remaining -= tqpv;
- rxr_idx++;
- txr_idx++;
- }
-
- return 0;
-
-err_out:
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--)
- igc_free_q_vector(adapter, v_idx);
-
- return -ENOMEM;
-}
-
-/**
- * igc_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- */
-static void igc_cache_ring_register(struct igc_adapter *adapter)
-{
- int i = 0, j = 0;
-
- switch (adapter->hw.mac.type) {
- case igc_i225:
- /* Fall through */
- default:
- for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j]->reg_idx = j;
- break;
- }
-}
-
-/**
- * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
- * @adapter: Pointer to adapter structure
- *
- * This function initializes the interrupts and allocates all of the queues.
- */
-static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
-{
- struct pci_dev *pdev = adapter->pdev;
- int err = 0;
-
- igc_set_interrupt_capability(adapter, msix);
-
- err = igc_alloc_q_vectors(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
- goto err_alloc_q_vectors;
- }
-
- igc_cache_ring_register(adapter);
-
- return 0;
-
-err_alloc_q_vectors:
- igc_reset_interrupt_capability(adapter);
- return err;
-}
-
static void igc_free_irq(struct igc_adapter *adapter)
{
if (adapter->msix_entries) {
@@ -3947,62 +4268,6 @@ static void igc_free_irq(struct igc_adapter *adapter)
}
/**
- * igc_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- */
-static void igc_irq_disable(struct igc_adapter *adapter)
-{
- struct igc_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 regval = rd32(IGC_EIAM);
-
- wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
- wr32(IGC_EIMC, adapter->eims_enable_mask);
- regval = rd32(IGC_EIAC);
- wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
- }
-
- wr32(IGC_IAM, 0);
- wr32(IGC_IMC, ~0);
- wrfl();
-
- if (adapter->msix_entries) {
- int vector = 0, i;
-
- synchronize_irq(adapter->msix_entries[vector++].vector);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- synchronize_irq(adapter->msix_entries[vector++].vector);
- } else {
- synchronize_irq(adapter->pdev->irq);
- }
-}
-
-/**
- * igc_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- */
-static void igc_irq_enable(struct igc_adapter *adapter)
-{
- struct igc_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
- u32 regval = rd32(IGC_EIAC);
-
- wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
- regval = rd32(IGC_EIAM);
- wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
- wr32(IGC_EIMS, adapter->eims_enable_mask);
- wr32(IGC_IMS, ims);
- } else {
- wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
- wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
- }
-}
-
-/**
* igc_request_irq - initialize interrupts
* @adapter: Pointer to adapter structure
*
@@ -4056,25 +4321,10 @@ request_done:
return err;
}
-static void igc_write_itr(struct igc_q_vector *q_vector)
-{
- u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
-
- if (!q_vector->set_itr)
- return;
-
- if (!itr_val)
- itr_val = IGC_ITR_VAL_MASK;
-
- itr_val |= IGC_EITR_CNT_IGNR;
-
- writel(itr_val, q_vector->itr_register);
- q_vector->set_itr = 0;
-}
-
/**
- * igc_open - Called when a network interface is made active
+ * __igc_open - Called when a network interface is made active
* @netdev: network interface device structure
+ * @resuming: boolean indicating if the device is resuming
*
* Returns 0 on success, negative value on failure
*
@@ -4164,8 +4414,9 @@ static int igc_open(struct net_device *netdev)
}
/**
- * igc_close - Disables a network interface
+ * __igc_close - Disables a network interface
* @netdev: network interface device structure
+ * @suspending: boolean indicating the device is suspending
*
* Returns 0, this is not allowed to fail
*
@@ -4199,6 +4450,24 @@ static int igc_close(struct net_device *netdev)
return 0;
}
+/**
+ * igc_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ * @cmd: ioctl command
+ **/
+static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return igc_ptp_get_ts_config(netdev, ifr);
+ case SIOCSHWTSTAMP:
+ return igc_ptp_set_ts_config(netdev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4210,6 +4479,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
+ .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -4345,32 +4615,26 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err;
+ int err, pci_using_dac;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(64));
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "igc: Wrong DMA config\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- igc_driver_name);
+ err = pci_request_mem_regions(pdev, igc_driver_name);
if (err)
goto err_pci_reg;
@@ -4433,6 +4697,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Add supported features to the features list*/
+ netdev->features |= NETIF_F_SG;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
@@ -4446,6 +4713,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= netdev->features;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -4512,6 +4782,9 @@ static int igc_probe(struct pci_dev *pdev,
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ /* do hw tstamp init after resetting */
+ igc_ptp_init(adapter);
+
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
@@ -4532,8 +4805,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -4554,6 +4826,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ igc_ptp_stop(adapter);
+
set_bit(__IGC_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
@@ -4580,105 +4854,216 @@ static void igc_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_driver igc_driver = {
- .name = igc_driver_name,
- .id_table = igc_pci_tbl,
- .probe = igc_probe,
- .remove = igc_remove,
-};
-
-void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
- const u32 max_rss_queues)
+static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
- /* Determine if we need to pair queues. */
- /* If rss_queues > half of max_rss_queues, pair the queues in
- * order to conserve interrupts due to limited supply.
- */
- if (adapter->rss_queues > (max_rss_queues / 2))
- adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl, rctl, status;
+ bool wake;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __igc_close(netdev, true);
+
+ igc_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
+
+ status = rd32(IGC_STATUS);
+ if (status & IGC_STATUS_LU)
+ wufc &= ~IGC_WUFC_LNKC;
+
+ if (wufc) {
+ igc_setup_rctl(adapter);
+ igc_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & IGC_WUFC_MC) {
+ rctl = rd32(IGC_RCTL);
+ rctl |= IGC_RCTL_MPE;
+ wr32(IGC_RCTL, rctl);
+ }
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_ADVD3WUC;
+ wr32(IGC_CTRL, ctrl);
+
+ /* Allow time for pending master requests to run */
+ igc_disable_pcie_master(hw);
+
+ wr32(IGC_WUC, IGC_WUC_PME_EN);
+ wr32(IGC_WUFC, wufc);
+ } else {
+ wr32(IGC_WUC, 0);
+ wr32(IGC_WUFC, 0);
+ }
+
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
+ igc_power_down_link(adapter);
else
- adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
-}
+ igc_power_up_link(adapter);
-unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
-{
- unsigned int max_rss_queues;
+ if (enable_wake)
+ *enable_wake = wake;
- /* Determine the maximum number of RSS queues supported. */
- max_rss_queues = IGC_MAX_RX_QUEUES;
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ igc_release_hw_control(adapter);
- return max_rss_queues;
+ pci_disable_device(pdev);
+
+ return 0;
}
-static void igc_init_queue_configuration(struct igc_adapter *adapter)
+#ifdef CONFIG_PM
+static int __maybe_unused igc_runtime_suspend(struct device *dev)
{
- u32 max_rss_queues;
-
- max_rss_queues = igc_get_max_rss_queues(adapter);
- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
-
- igc_set_flag_queue_pairs(adapter, max_rss_queues);
+ return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
-/**
- * igc_sw_init - Initialize general software structures (struct igc_adapter)
- * @adapter: board private structure to initialize
- *
- * igc_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- */
-static int igc_sw_init(struct igc_adapter *adapter)
+static void igc_deliver_wake_packet(struct net_device *netdev)
{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
+ struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
+ struct sk_buff *skb;
+ u32 wupl;
- int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+ wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+ /* WUPM stores only the first 128 bytes of the wake packet.
+ * Read the packet only if we have the whole thing.
+ */
+ if (wupl == 0 || wupl > IGC_WUPM_BYTES)
+ return;
- /* set default ring sizes */
- adapter->tx_ring_count = IGC_DEFAULT_TXD;
- adapter->rx_ring_count = IGC_DEFAULT_RXD;
+ skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
+ if (!skb)
+ return;
- /* set default ITR values */
- adapter->rx_itr_setting = IGC_DEFAULT_ITR;
- adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+ skb_put(skb, wupl);
- /* set default work limits */
- adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+ /* Ensure reads are 32-bit aligned */
+ wupl = roundup(wupl, 4);
- /* adjust max frame to be at least the size of a standard frame */
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
- spin_lock_init(&adapter->nfc_lock);
- spin_lock_init(&adapter->stats64_lock);
- /* Assume MSI-X interrupts, will be checked during IRQ allocation */
- adapter->flags |= IGC_FLAG_HAS_MSIX;
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+}
- adapter->mac_table = kzalloc(size, GFP_ATOMIC);
- if (!adapter->mac_table)
- return -ENOMEM;
+static int __maybe_unused igc_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 err, val;
- igc_init_queue_configuration(adapter);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "igc: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
- /* This call may decrease the number of queues */
if (igc_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
- /* Explicitly disable IRQ since the NIC can be in any state. */
- igc_irq_disable(adapter);
+ igc_reset(adapter);
- set_bit(__IGC_DOWN, &adapter->state);
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
- return 0;
+ val = rd32(IGC_WUS);
+ if (val & WAKE_PKT_WUS)
+ igc_deliver_wake_packet(netdev);
+
+ wr32(IGC_WUS, ~0);
+
+ rtnl_lock();
+ if (!err && netif_running(netdev))
+ err = __igc_open(netdev, true);
+
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __maybe_unused igc_runtime_resume(struct device *dev)
+{
+ return igc_resume(dev);
+}
+
+static int __maybe_unused igc_suspend(struct device *dev)
+{
+ return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
+static int __maybe_unused igc_runtime_idle(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (!igc_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+#endif /* CONFIG_PM */
+
+static void igc_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __igc_shutdown(pdev, &wake, 0);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops igc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
+ SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle)
+};
+#endif
+
+static struct pci_driver igc_driver = {
+ .name = igc_driver_name,
+ .id_table = igc_pci_tbl,
+ .probe = igc_probe,
+ .remove = igc_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &igc_pm_ops,
+#endif
+ .shutdown = igc_shutdown,
+};
+
/**
* igc_reinit_queues - return error
* @adapter: pointer to adapter structure
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index f4b05af0dd2f..8e1799508edc 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -173,6 +173,7 @@ s32 igc_check_downshift(struct igc_hw *hw)
s32 igc_phy_hw_reset(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
+ u32 phpm = 0, timeout = 10000;
s32 ret_val;
u32 ctrl;
@@ -186,6 +187,8 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
if (ret_val)
goto out;
+ phpm = rd32(IGC_I225_PHPM);
+
ctrl = rd32(IGC_CTRL);
wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
wrfl();
@@ -195,7 +198,18 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
wr32(IGC_CTRL, ctrl);
wrfl();
- usleep_range(1500, 2000);
+ /* SW should guarantee 100us for the completion of the PHY reset */
+ usleep_range(100, 150);
+ do {
+ phpm = rd32(IGC_I225_PHPM);
+ timeout--;
+ udelay(1);
+ } while (!(phpm & IGC_PHY_RST_COMP) && timeout);
+
+ if (!timeout)
+ hw_dbg("Timeout is expired after a phy reset\n");
+
+ usleep_range(100, 150);
phy->ops.release(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
new file mode 100644
index 000000000000..693506587198
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Intel Corporation */
+
+#include "igc.h"
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+
+#define INCVALUE_MASK 0x7fffffff
+#define ISGN 0x80000000
+
+#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGC_PTP_TX_TIMEOUT (HZ * 15)
+
+/* SYSTIM read access for I225 */
+static void igc_ptp_read_i225(struct igc_adapter *adapter,
+ struct timespec64 *ts)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 sec, nsec;
+
+ /* The timestamp latches on lowest register read. For I210/I211, the
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+ rd32(IGC_SYSTIMR);
+ nsec = rd32(IGC_SYSTIML);
+ sec = rd32(IGC_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void igc_ptp_write_i225(struct igc_adapter *adapter,
+ const struct timespec64 *ts)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Writing the SYSTIMR register is not necessary as it only
+ * provides sub-nanosecond resolution.
+ */
+ wr32(IGC_SYSTIML, ts->tv_nsec);
+ wr32(IGC_SYSTIMH, ts->tv_sec);
+}
+
+static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 inca;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+ rate = scaled_ppm;
+ rate <<= 14;
+ rate = div_u64(rate, 78125);
+
+ inca = rate & INCVALUE_MASK;
+ if (neg_adj)
+ inca |= ISGN;
+
+ wr32(IGC_TIMINCA, inca);
+
+ return 0;
+}
+
+static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ igc_ptp_read_i225(igc, &now);
+ now = timespec64_add(now, then);
+ igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(IGC_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(IGC_SYSTIML);
+ ts->tv_sec = rd32(IGC_SYSTIMH);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_settime_i225(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ igc_ptp_write_i225(igc, ts);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ **/
+static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(systim >> 32,
+ systim & 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve the first timestamp from the
+ * first buffer of an incoming frame. The value is stored in little
+ * endian format starting on byte 0. There's a second timestamp
+ * starting on byte 8.
+ **/
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ struct sk_buff *skb)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ __le64 *regval = (__le64 *)va;
+ int adjust = 0;
+
+ /* The timestamp is recorded in little endian format.
+ * DWORD: | 0 | 1 | 2 | 3
+ * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
+ */
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[0]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == igc_i225) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_RX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_RX_LATENCY_2500;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+}
+
+/**
+ * igc_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ u64 regval;
+
+ /* If this bit is set, then the RX registers contain the time
+ * stamp. No other packet will be time stamped until we read
+ * these registers, so read the registers to make them
+ * available again. Because only one packet can be time
+ * stamped at a time, we know that the register values must
+ * belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared
+ * tx_flags that we can turn into a skb_shared_hwtstamps.
+ */
+ if (!(rd32(IGC_TSYNCRXCTL) & IGC_TSYNCRXCTL_VALID))
+ return;
+
+ regval = rd32(IGC_RXSTMPL);
+ regval |= (u64)rd32(IGC_RXSTMPH) << 32;
+
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
+}
+
+/**
+ * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue
+ * @rx_ring: Pointer to RX queue
+ * @timer: Index for timer
+ *
+ * This function enables RX timestamping for a queue, and selects
+ * which 1588 timer will provide the timestamp.
+ */
+static void igc_ptp_enable_tstamp_rxqueue(struct igc_adapter *adapter,
+ struct igc_ring *rx_ring, u8 timer)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int reg_idx = rx_ring->reg_idx;
+ u32 srrctl = rd32(IGC_SRRCTL(reg_idx));
+
+ srrctl |= IGC_SRRCTL_TIMESTAMP;
+ srrctl |= IGC_SRRCTL_TIMER1SEL(timer);
+ srrctl |= IGC_SRRCTL_TIMER0SEL(timer);
+
+ wr32(IGC_SRRCTL(reg_idx), srrctl);
+}
+
+static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
+ u8 timer)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_ptp_enable_tstamp_rxqueue(adapter, ring, timer);
+ }
+}
+
+/**
+ * igc_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ */
+static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
+ struct hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
+ struct igc_hw *hw = &adapter->hw;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ /* fall through */
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as long
+ * as one Rx filter was configured.
+ */
+ if (tsync_rx_ctl) {
+ tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
+
+ if (hw->mac.type == igc_i225) {
+ regval = rd32(IGC_RXPBS);
+ regval |= IGC_RXPBS_CFG_TS_EN;
+ wr32(IGC_RXPBS, regval);
+
+ /* FIXME: For now, only support retrieving RX
+ * timestamps from timer 0
+ */
+ igc_ptp_enable_tstamp_all_rxqueues(adapter, 0);
+ }
+ }
+
+ if (tsync_tx_ctl) {
+ tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
+ tsync_tx_ctl |= IGC_TSYNCTXCTL_TXSYNSIG;
+ }
+
+ /* enable/disable TX */
+ regval = rd32(IGC_TSYNCTXCTL);
+ regval &= ~IGC_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32(IGC_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(IGC_TSYNCRXCTL);
+ regval &= ~(IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(IGC_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ wr32(IGC_ETQF(3),
+ (IGC_ETQF_FILTER_ENABLE | /* enable filter */
+ IGC_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(IGC_ETQF(3), 0);
+
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | IGC_FTQF_VF_BP /* VF not compared */
+ | IGC_FTQF_1588_TIME_STAMP /* Enable Timestamp */
+ | IGC_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~IGC_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ wr32(IGC_IMIR(3), htons(PTP_EV_PORT));
+ wr32(IGC_IMIREXT(3),
+ (IGC_IMIREXT_SIZE_BP | IGC_IMIREXT_CTRL_BP));
+ wr32(IGC_FTQF(3), ftqf);
+ } else {
+ wr32(IGC_FTQF(3), IGC_FTQF_MASK);
+ }
+ wrfl();
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = rd32(IGC_TXSTMPL);
+ regval = rd32(IGC_TXSTMPH);
+ regval = rd32(IGC_RXSTMPL);
+ regval = rd32(IGC_RXSTMPH);
+
+ return 0;
+}
+
+void igc_ptp_tx_hang(struct igc_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IGC_PTP_TX_TIMEOUT);
+ struct igc_hw *hw = &adapter->hw;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ /* Clear the Tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(IGC_TXSTMPH);
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
+ * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct igc_hw *hw = &adapter->hw;
+ u64 regval;
+
+ regval = rd32(IGC_TXSTMPL);
+ regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+ igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+
+ /* Clear the lock early before calling skb_tstamp_tx so that
+ * applications are not woken up before the lock bit is clear. We use
+ * a copy of the skb pointer to ensure other threads can't change it
+ * while we're notifying the stack.
+ */
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * igc_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igc_ptp_tx_work(struct work_struct *work)
+{
+ struct igc_adapter *adapter = container_of(work, struct igc_adapter,
+ ptp_tx_work);
+ struct igc_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGC_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(IGC_TXSTMPH);
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ return;
+ }
+
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ if (tsynctxctl & IGC_TSYNCTXCTL_VALID)
+ igc_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to check later */
+ schedule_work(&adapter->ptp_tx_work);
+}
+
+/**
+ * igc_ptp_set_ts_config - set hardware time stamping config
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ *
+ **/
+int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igc_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igc_ptp_get_ts_config - get hardware time stamping config
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igc_ptp_init - Initialize PTP functionality
+ * @adapter: Board private structure
+ *
+ * This function is called at device probe to initialize the PTP
+ * functionality.
+ */
+void igc_ptp_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
+ adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
+ adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
+ adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
+ adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
+ break;
+ default:
+ adapter->ptp_clock = NULL;
+ return;
+ }
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work);
+
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ igc_ptp_reset(adapter);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ } else if (adapter->ptp_clock) {
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGC_PTP_ENABLED;
+ }
+}
+
+/**
+ * igc_ptp_suspend - Disable PTP work items and prepare for suspend
+ * @adapter: Board private structure
+ *
+ * This function stops the overflow check work and PTP Tx timestamp work, and
+ * will prepare the device for OS suspend.
+ */
+void igc_ptp_suspend(struct igc_adapter *adapter)
+{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
+}
+
+/**
+ * igc_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igc_ptp_stop(struct igc_adapter *adapter)
+{
+ igc_ptp_suspend(adapter);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
+ }
+}
+
+/**
+ * igc_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
+ *
+ * This function handles the reset work required to re-enable the PTP device.
+ **/
+void igc_ptp_reset(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ /* reset the tstamp_config */
+ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ wr32(IGC_TSAUXC, 0x0);
+ wr32(IGC_TSSDP, 0x0);
+ wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS);
+ wr32(IGC_IMS, IGC_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+ goto out;
+ }
+
+ /* Re-initialize the timer. */
+ if (hw->mac.type == igc_i225) {
+ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
+
+ igc_ptp_write_i225(adapter, &ts64);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+out:
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ wrfl();
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 50d7c04dccf5..c9029b549b90 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
@@ -209,12 +210,48 @@
#define IGC_LENERRS 0x04138 /* Length Errors Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+/* Time sync registers */
+#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */
+#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */
+#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
+
+#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+
+#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+
+#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+
+/* System Time Registers */
+#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
+#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
+#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+
+#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
/* Shadow Ram Write Register - RW */
#define IGC_SRWR 0x12018
+/* Wake Up registers */
+#define IGC_WUC 0x05800 /* Wakeup Control - RW */
+#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
+#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+
+/* Wake Up packet memory */
+#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+
/* forward declaration */
struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 3d8c051dd327..b64e91ea3465 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -70,7 +70,7 @@ static int ixgb_clean(struct napi_struct *, int);
static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
-static void ixgb_tx_timeout(struct net_device *dev);
+static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
@@ -1538,7 +1538,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
**/
static void
-ixgb_tx_timeout(struct net_device *netdev)
+ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 171cdc552961..5b1cf49df3d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -166,7 +166,9 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
ixgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
- adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+ /* TX Queue number below is wrong, but ixgbe does not use it */
+ adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev,
+ UINT_MAX);
e_dev_info("tx_timeout called\n");
} else {
e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 82a30b597cf9..718931d951bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
- u64 action;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
- action = filter->action;
- if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
- action =
- (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -6158,7 +6175,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void ixgbe_tx_timeout(struct net_device *netdev)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b43be9f14105..74b540ebb3dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -277,7 +277,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -304,7 +304,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 076f2da36f27..4622c4ea2e46 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -250,7 +250,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 25aa400e2e3c..2e4975572e9f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2337,7 +2337,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
}
static void
-jme_tx_timeout(struct net_device *netdev)
+jme_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct jme_adapter *jme = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index ae195f8adff5..f1d84921e42b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -917,7 +917,7 @@ static void korina_restart_task(struct work_struct *work)
enable_irq(lp->rx_irq);
}
-static void korina_tx_timeout(struct net_device *dev)
+static void korina_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct korina_private *lp = netdev_priv(dev);
@@ -1043,7 +1043,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->eth_regs = ioremap(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1051,7 +1051,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->rx_dma_regs = ioremap(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1059,7 +1059,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->tx_dma_regs = ioremap(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6e73ffe6f928..2d0c52f7106b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -510,13 +510,6 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
}
static int
-ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-static int
ltq_etop_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
@@ -594,7 +587,7 @@ err_hw:
}
static void
-ltq_etop_tx_timeout(struct net_device *dev)
+ltq_etop_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
int err;
@@ -616,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_stop = ltq_etop_stop,
.ndo_start_xmit = ltq_etop_tx,
.ndo_change_mtu = ltq_etop_change_mtu,
- .ndo_do_ioctl = ltq_etop_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
@@ -649,7 +642,7 @@ ltq_etop_probe(struct platform_device *pdev)
goto err_out;
}
- ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
+ ltq_etop_membase = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 65a093216dac..3c8125cbc84d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2590,7 +2590,7 @@ static void tx_timeout_task(struct work_struct *ugly)
}
}
-static void mv643xx_eth_tx_timeout(struct net_device *dev)
+static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 71a872d46bc4..2dfbfdff45a8 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2081,7 +2081,11 @@ static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp)
{
- u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+ unsigned int len;
+ u32 ret, act;
+
+ len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
@@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (err) {
ret = MVNETA_XDP_DROPPED;
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
} else {
ret = MVNETA_XDP_REDIR;
}
@@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_DROP:
__page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ len, true);
ret = MVNETA_XDP_DROPPED;
break;
}
@@ -3071,7 +3072,7 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
- .nid = cpu_to_node(0),
+ .nid = NUMA_NO_NODE,
.dev = pp->dev->dev.parent,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = pp->rx_offset_correction,
@@ -4225,6 +4226,12 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ if (pp->bm_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Hardware Buffer Management not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
need_update = !!pp->xdp_prog != !!prog;
if (running && need_update)
mvneta_stop(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 14e372cda7f4..72133cbe55d4 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1114,7 +1114,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
/* Port configuration routines */
static bool mvpp2_is_xlg(phy_interface_t interface)
{
- return interface == PHY_INTERFACE_MODE_10GKR ||
+ return interface == PHY_INTERFACE_MODE_10GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1200,7 +1200,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
if (port->gop_id != 0)
goto invalid_conf;
mvpp22_gop_init_10gkr(port);
@@ -1649,7 +1649,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
MAC_CLK_RESET_SD_TX;
@@ -4758,7 +4758,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
/* Invalid combinations */
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
if (port->gop_id != 0)
goto empty_set;
@@ -4780,7 +4780,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
if (port->gop_id == 0) {
@@ -4792,6 +4792,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10000baseER_Full);
phylink_set(mask, 10000baseKR_Full);
}
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4802,13 +4804,23 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
+ if (port->comphy ||
+ state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (port->comphy ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
break;
default:
goto empty_set;
@@ -4817,6 +4829,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ phylink_helper_basex_speed(state);
return;
empty_set:
@@ -5233,6 +5247,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
}
+ /*
+ * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
+ * Existing usage of 10GBASE-KR is not correct; no backplane
+ * negotiation is done, and this driver does not actually support
+ * 10GBASE-KR.
+ */
+ if (phy_mode == PHY_INTERFACE_MODE_10GKR)
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
+
if (port_node) {
comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
if (IS_ERR(comphy)) {
@@ -5411,6 +5434,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink = NULL;
}
+ /* Cycle the comphy to power it down, saving 270mW per port -
+ * don't worry about an error powering it up. When the comphy
+ * driver does this, we can remove this code.
+ */
+ if (port->comphy) {
+ err = mvpp22_comphy_init(port);
+ if (err == 0)
+ phy_power_off(port->comphy);
+ }
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index fb34fbd62088..ced514c05c97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -25,3 +25,11 @@ config NDC_DIS_DYNAMIC_CACHING
This config option disables caching of dynamic entries such as NIX SQEs
, NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
NPA Aura/Pool contexts.
+
+config OCTEONTX2_PF
+ tristate "Marvell OcteonTX2 NIC Physical Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
index e579dcd54c97..0064a69e0f72 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -3,4 +3,6 @@
# Makefile for Marvell OcteonTX2 device drivers.
#
+obj-$(CONFIG_OCTEONTX2_MBOX) += af/
obj-$(CONFIG_OCTEONTX2_AF) += af/
+obj-$(CONFIG_OCTEONTX2_PF) += nic/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 784207bae5f8..cd33c2e6ca5f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,8 +143,13 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
-#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
-#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define MAX_SCHED_WEIGHT 0xFF
+#define DFLT_RR_WEIGHT 71
+#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
+ / MAX_SCHED_WEIGHT)
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a589748f1240..8bbc1f1d81f5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -210,7 +210,8 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
+M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -618,6 +619,11 @@ struct nix_set_mac_addr {
u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
};
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
struct nix_mark_format_cfg {
struct mbox_msghdr hdr;
u8 offset;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 8a59f7d53fbf..eb5e542424e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2546,6 +2546,23 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_get_mac_addr_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
new file mode 100644
index 000000000000..41bf00cf5b1d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 ethernet device drivers
+#
+
+obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
+
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
new file mode 100644
index 000000000000..8247d21d0432
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -0,0 +1,1410 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/tso.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+
+static void otx2_nix_rq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+static void otx2_nix_sq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+void otx2_update_lmac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return;
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+}
+
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
+
+ if (!pfvf->qset.rq)
+ return 0;
+
+ otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
+ return 1;
+}
+
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
+
+ if (!pfvf->qset.sq)
+ return 0;
+
+ otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
+ return 1;
+}
+
+void otx2_get_dev_stats(struct otx2_nic *pfvf)
+{
+ struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
+
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
+ dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
+ dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
+ dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
+ dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
+ dev_stats->rx_frames = dev_stats->rx_bcast_frames +
+ dev_stats->rx_mcast_frames +
+ dev_stats->rx_ucast_frames;
+
+ dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
+ dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
+ dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
+ dev_stats->tx_frames = dev_stats->tx_bcast_frames +
+ dev_stats->tx_mcast_frames +
+ dev_stats->tx_ucast_frames;
+}
+
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_dev_stats *dev_stats;
+
+ otx2_get_dev_stats(pfvf);
+
+ dev_stats = &pfvf->hw.dev_stats;
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_drops;
+ stats->multicast = dev_stats->rx_mcast_frames;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_frames;
+ stats->tx_dropped = dev_stats->tx_drops;
+}
+
+/* Sync MAC address with RVU AF */
+static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
+{
+ struct nix_set_mac_addr *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
+ struct net_device *netdev)
+{
+ struct nix_get_mac_addr_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+
+ msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!msghdr) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+ rsp = (struct nix_get_mac_addr_rsp *)msghdr;
+ ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ return 0;
+}
+
+int otx2_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
+{
+ struct nix_frs_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ /* SMQ config limits maximum pkt size that can be transmitted */
+ req->update_smq = true;
+ pfvf->max_frs = mtu + OTX2_ETH_HLEN;
+ req->maxlen = pfvf->max_frs;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct nix_rss_flowkey_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+ req->mcam_index = -1; /* Default or reserved index */
+ req->flowkey_cfg = rss->flowkey_cfg;
+ req->group = DEFAULT_RSS_CONTEXT_GROUP;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+int otx2_set_rss_table(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct mbox *mbox = &pfvf->mbox;
+ struct nix_aq_enq_req *aq;
+ int idx, err;
+
+ otx2_mbox_lock(mbox);
+ /* Get memory to put this msg */
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ err = otx2_sync_mbox_msg(mbox);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ otx2_mbox_unlock(mbox);
+ return -ENOMEM;
+ }
+ }
+
+ aq->rss.rq = rss->ind_tbl[idx];
+
+ /* Fill AQ info */
+ aq->qidx = idx;
+ aq->ctype = NIX_AQ_CTYPE_RSS;
+ aq->op = NIX_AQ_INSTOP_INIT;
+ }
+ err = otx2_sync_mbox_msg(mbox);
+ otx2_mbox_unlock(mbox);
+ return err;
+}
+
+void otx2_set_rss_key(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u64 *key = (u64 *)&rss->key[4];
+ int idx;
+
+ /* 352bit or 44byte key needs to be configured as below
+ * NIX_LF_RX_SECRETX0 = key<351:288>
+ * NIX_LF_RX_SECRETX1 = key<287:224>
+ * NIX_LF_RX_SECRETX2 = key<223:160>
+ * NIX_LF_RX_SECRETX3 = key<159:96>
+ * NIX_LF_RX_SECRETX4 = key<95:32>
+ * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
+ */
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
+ (u64)(*((u32 *)&rss->key)) << 32);
+ idx = sizeof(rss->key) / sizeof(u64);
+ while (idx > 0) {
+ idx--;
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
+ }
+}
+
+int otx2_rss_init(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ int idx, ret = 0;
+
+ rss->rss_size = sizeof(rss->ind_tbl);
+
+ /* Init RSS key if it is not setup already */
+ if (!rss->enable)
+ netdev_rss_key_fill(rss->key, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+
+ if (!netif_is_rxfh_configured(pfvf->netdev)) {
+ /* Default indirection table */
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] =
+ ethtool_rxfh_indir_default(idx,
+ pfvf->hw.rx_queues);
+ }
+ ret = otx2_set_rss_table(pfvf);
+ if (ret)
+ return ret;
+
+ /* Flowkey or hash config to be used for generating flow tag */
+ rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
+ NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
+ NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+
+ ret = otx2_set_flowkey_cfg(pfvf);
+ if (ret)
+ return ret;
+
+ rss->enable = true;
+ return 0;
+}
+
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
+{
+ /* Configure CQE interrupt coalescing parameters
+ *
+ * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
+ * set 1 less than cq_ecount_wait. And cq_time_wait is in
+ * usecs, convert that to 100ns count.
+ */
+ otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
+ ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
+ ((u64)pfvf->hw.cq_qcount_wait << 32) |
+ (pfvf->hw.cq_ecount_wait - 1));
+}
+
+dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ gfp_t gfp)
+{
+ dma_addr_t iova;
+
+ /* Check if request can be accommodated in previous allocated page */
+ if (pool->page && ((pool->page_offset + pool->rbsize) <=
+ (PAGE_SIZE << pool->rbpage_order))) {
+ pool->pageref++;
+ goto ret;
+ }
+
+ otx2_get_page(pool);
+
+ /* Allocate a new page */
+ pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ pool->rbpage_order);
+ if (unlikely(!pool->page))
+ return -ENOMEM;
+
+ pool->page_offset = 0;
+ret:
+ iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
+ pool->rbsize, DMA_FROM_DEVICE);
+ if (!iova) {
+ if (!pool->page_offset)
+ __free_pages(pool->page, pool->rbpage_order);
+ pool->page = NULL;
+ return -ENOMEM;
+ }
+ pool->page_offset += pool->rbsize;
+ return iova;
+}
+
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ schedule_work(&pfvf->reset_task);
+}
+
+void otx2_get_mac_from_af(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int err;
+
+ err = otx2_hw_get_mac_addr(pfvf, netdev);
+ if (err)
+ dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
+
+ /* If AF doesn't provide a valid MAC, generate a random one */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ eth_hw_addr_random(netdev);
+}
+
+static int otx2_get_link(struct otx2_nic *pfvf)
+{
+ int link = 0;
+ u16 map;
+
+ /* cgx lmac link */
+ if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
+ map = pfvf->hw.tx_chan_base & 0x7FF;
+ link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
+ }
+ /* LBK channel */
+ if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
+ link = 12;
+
+ return link;
+}
+
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ struct nix_txschq_config *req;
+ u64 schq, parent;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->lvl = lvl;
+ req->num_regs = 1;
+
+ schq = hw->txschq_list[lvl][0];
+ /* Set topology e.t.c configuration */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ req->reg[0] = NIX_AF_SMQX_CFG(schq);
+ req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) |
+ OTX2_MIN_MTU;
+
+ req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
+ (0x2ULL << 36);
+ req->num_regs++;
+ /* MDQ config */
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ req->reg[1] = NIX_AF_MDQX_PARENT(schq);
+ req->regval[1] = parent << 16;
+ req->num_regs++;
+ /* Set DWRR quantum */
+ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
+ req->regval[2] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ req->reg[0] = NIX_AF_TL4X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ req->reg[0] = NIX_AF_TL3X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ req->reg[0] = NIX_AF_TL2X_PARENT(schq);
+ req->regval[0] = parent << 16;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ otx2_get_link(pfvf));
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+
+ } else if (lvl == NIX_TXSCH_LVL_TL1) {
+ /* Default config for TL1.
+ * For VF this is always ignored.
+ */
+
+ /* Set DWRR quantum */
+ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
+ req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+ req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL1X_CIR(schq);
+ req->regval[2] = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_txsch_alloc(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_alloc_req *req;
+ int lvl;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ req->schq[lvl] = 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_txschq_stop(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_free_req *free_req;
+ int lvl, schq, err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ /* Free the transmit schedulers */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ /* Clear the txschq list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
+ pfvf->hw.txschq_list[lvl][schq] = 0;
+ }
+ return err;
+}
+
+void otx2_sqb_flush(struct otx2_nic *pfvf)
+{
+ int qidx, sqe_tail, sqe_head;
+ u64 incr, *ptr, val;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ incr = (u64)qidx << 32;
+ while (1) {
+ val = otx2_atomic64_add(incr, ptr);
+ sqe_head = (val >> 20) & 0x3F;
+ sqe_tail = (val >> 28) & 0x3F;
+ if (sqe_head == sqe_tail)
+ break;
+ usleep_range(1, 3);
+ }
+ }
+}
+
+/* RED and drop levels of CQ on packet reception.
+ * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
+ */
+#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
+#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
+
+/* RED and drop levels of AURA for packet reception.
+ * For AURA level is measure of fullness (0x0 = empty, 255 = full).
+ * Eg: For RQ length 1K, for pass/drop level 204/230.
+ * RED accepts pkts if free pointers > 102 & <= 205.
+ * Drops pkts if free pointers < 102.
+ */
+#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
+#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
+
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
+static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->rq.cq = qidx;
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 1;
+ aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
+ aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
+ aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
+ aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
+ aq->rq.qint_idx = 0;
+ aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
+ aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
+ aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
+ aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[sqb_aura];
+ sq = &qset->sq[qidx];
+ sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
+ sq->sqe_cnt = qset->sqe_cnt;
+
+ err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+
+ sq->sqe_base = sq->sqe->base;
+ sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
+ if (!sq->sg)
+ return -ENOMEM;
+
+ sq->head = 0;
+ sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
+ sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
+ /* Set SQE threshold to 10% of total SQEs */
+ sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
+ sq->aura_id = sqb_aura;
+ sq->aura_fc_addr = pool->fc_addr->base;
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
+
+ sq->stats.bytes = 0;
+ sq->stats.pkts = 0;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+ struct otx2_cq_queue *cq;
+ int err, pool_id;
+
+ cq = &qset->cq[qidx];
+ cq->cq_idx = qidx;
+ if (qidx < pfvf->hw.rx_queues) {
+ cq->cq_type = CQ_RX;
+ cq->cint_idx = qidx;
+ cq->cqe_cnt = qset->rqe_cnt;
+ } else {
+ cq->cq_type = CQ_TX;
+ cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ }
+ cq->cqe_size = pfvf->qset.xqe_size;
+
+ /* Allocate memory for CQEs */
+ err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
+ if (err)
+ return err;
+
+ /* Save CQE CPU base for faster reference */
+ cq->cqe_base = cq->cqe->base;
+ /* In case where all RQs auras point to single pool,
+ * all CQs receive buffer pool also point to same pool.
+ */
+ pool_id = ((cq->cq_type == CQ_RX) &&
+ (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
+ cq->rbpool = &qset->pool[pool_id];
+ cq->refill_task_sched = false;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->cq.ena = 1;
+ aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
+ aq->cq.caching = 1;
+ aq->cq.base = cq->cqe->iova;
+ aq->cq.cint_idx = cq->cint_idx;
+ aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
+ aq->cq.qint_idx = 0;
+ aq->cq.avg_level = 255;
+
+ if (qidx < pfvf->hw.rx_queues) {
+ aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
+ aq->cq.drop_ena = 1;
+ }
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static void otx2_pool_refill_task(struct work_struct *work)
+{
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *rbpool;
+ struct refill_work *wrk;
+ int qidx, free_ptrs = 0;
+ struct otx2_nic *pfvf;
+ s64 bufptr;
+
+ wrk = container_of(work, struct refill_work, pool_refill_work.work);
+ pfvf = wrk->pf;
+ qidx = wrk - pfvf->refill_wrk;
+ cq = &pfvf->qset.cq[qidx];
+ rbpool = cq->rbpool;
+ free_ptrs = cq->pool_ptrs;
+
+ while (cq->pool_ptrs) {
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ if (bufptr <= 0) {
+ /* Schedule a WQ if we fails to free atleast half of the
+ * pointers else enable napi for this RQ.
+ */
+ if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
+ struct delayed_work *dwork;
+
+ dwork = &wrk->pool_refill_work;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ } else {
+ cq->refill_task_sched = false;
+ }
+ return;
+ }
+ otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+ cq->refill_task_sched = false;
+}
+
+int otx2_config_nix_queues(struct otx2_nic *pfvf)
+{
+ int qidx, err;
+
+ /* Initialize RX queues */
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+
+ err = otx2_rq_init(pfvf, qidx, lpb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize TX queues */
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+
+ err = otx2_sq_init(pfvf, qidx, sqb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize completion queues */
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ err = otx2_cq_init(pfvf, qidx);
+ if (err)
+ return err;
+ }
+
+ /* Initialize work queue for receive buffer refill */
+ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
+ sizeof(struct refill_work), GFP_KERNEL);
+ if (!pfvf->refill_wrk)
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ pfvf->refill_wrk[qidx].pf = pfvf;
+ INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
+ otx2_pool_refill_task);
+ }
+ return 0;
+}
+
+int otx2_config_nix(struct otx2_nic *pfvf)
+{
+ struct nix_lf_alloc_req *nixlf;
+ struct nix_lf_alloc_rsp *rsp;
+ int err;
+
+ pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+
+ /* Get memory to put this msg */
+ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
+ if (!nixlf)
+ return -ENOMEM;
+
+ /* Set RQ/SQ/CQ counts */
+ nixlf->rq_cnt = pfvf->hw.rx_queues;
+ nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->cq_cnt = pfvf->qset.cq_cnt;
+ nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
+ nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->xqe_sz = NIX_XQESZ_W16;
+ /* We don't know absolute NPA LF idx attached.
+ * AF will replace 'RVU_DEFAULT_PF_FUNC' with
+ * NPA LF attached to this RVU PF/VF.
+ */
+ nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
+ /* Disable alignment pad, enable L2 length check,
+ * enable L4 TCP/UDP checksum verification.
+ */
+ nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+
+ rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
+ &nixlf->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->qints < 1)
+ return -ENXIO;
+
+ return rsp->hdr.rc;
+}
+
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ int sqb, qidx;
+ u64 iova, pa;
+
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ sq = &qset->sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+ for (sqb = 0; sqb < sq->sqb_count; sqb++) {
+ if (!sq->sqb_ptrs[sqb])
+ continue;
+ iova = sq->sqb_ptrs[sqb];
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ }
+ sq->sqb_count = 0;
+ }
+}
+
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
+{
+ int pool_id, pool_start = 0, pool_end = 0, size = 0;
+ u64 iova, pa;
+
+ if (type == AURA_NIX_SQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pool_start + pfvf->hw.sqpool_cnt;
+ size = pfvf->hw.sqb_size;
+ }
+ if (type == AURA_NIX_RQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pfvf->hw.rqpool_cnt;
+ size = pfvf->rbsize;
+ }
+
+ /* Free SQB and RQB pointers from the aura pool */
+ for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ while (iova) {
+ if (type == AURA_NIX_RQ)
+ iova -= OTX2_HEAD_ROOM;
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ }
+ }
+}
+
+void otx2_aura_pool_free(struct otx2_nic *pfvf)
+{
+ struct otx2_pool *pool;
+ int pool_id;
+
+ if (!pfvf->qset.pool)
+ return;
+
+ for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ qmem_free(pfvf->dev, pool->stack);
+ qmem_free(pfvf->dev, pool->fc_addr);
+ }
+ devm_kfree(pfvf->dev, pfvf->qset.pool);
+}
+
+static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+ pool->rbpage_order = get_order(buf_size);
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ int qidx, pool_id, stack_pages, num_sqbs;
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ struct otx2_pool *pool;
+ int err, ptr;
+ s64 bufptr;
+
+ /* Calculate number of SQBs needed.
+ *
+ * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
+ * Last SQE is used for pointing to next SQB.
+ */
+ num_sqbs = (hw->sqb_size / 128) - 1;
+ num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
+
+ /* Get no of stack pages needed */
+ stack_pages =
+ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+ if (err)
+ goto fail;
+
+ /* Initialize pool context */
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_sqbs, hw->sqb_size);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ sq = &qset->sq[qidx];
+ sq->sqb_count = 0;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
+ if (!sq->sqb_ptrs)
+ return -ENOMEM;
+
+ for (ptr = 0; ptr < num_sqbs; ptr++) {
+ bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ if (bufptr <= 0)
+ return bufptr;
+ otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
+ }
+ otx2_get_page(pool);
+ }
+
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int stack_pages, pool_id, rq;
+ struct otx2_pool *pool;
+ int err, ptr, num_ptrs;
+ s64 bufptr;
+
+ num_ptrs = pfvf->qset.rqe_cnt;
+
+ stack_pages =
+ (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (rq = 0; rq < hw->rx_queues; rq++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
+ if (err)
+ goto fail;
+ }
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_ptrs, pfvf->rbsize);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ for (ptr = 0; ptr < num_ptrs; ptr++) {
+ bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ if (bufptr <= 0)
+ return bufptr;
+ otx2_aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
+ }
+ otx2_get_page(pool);
+ }
+
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_config_npa(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct npa_lf_alloc_req *npalf;
+ struct otx2_hw *hw = &pfvf->hw;
+ int aura_cnt;
+
+ /* Pool - Stack of free buffer pointers
+ * Aura - Alloc/frees pointers from/to pool for NIX DMA.
+ */
+
+ if (!hw->pool_cnt)
+ return -EINVAL;
+
+ qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
+ hw->pool_cnt, GFP_KERNEL);
+ if (!qset->pool)
+ return -ENOMEM;
+
+ /* Get memory to put this msg */
+ npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
+ if (!npalf)
+ return -ENOMEM;
+
+ /* Set aura and pool counts */
+ npalf->nr_pools = hw->pool_cnt;
+ aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
+ npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_detach_resources(struct mbox *mbox)
+{
+ struct rsrc_detach *detach;
+
+ otx2_mbox_lock(mbox);
+ detach = otx2_mbox_alloc_msg_detach_resources(mbox);
+ if (!detach) {
+ otx2_mbox_unlock(mbox);
+ return -ENOMEM;
+ }
+
+ /* detach all */
+ detach->partial = false;
+
+ /* Send detach request to AF */
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_mbox_unlock(mbox);
+ return 0;
+}
+
+int otx2_attach_npa_nix(struct otx2_nic *pfvf)
+{
+ struct rsrc_attach *attach;
+ struct msg_req *msix;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
+ if (!attach) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ attach->npalf = true;
+ attach->nixlf = true;
+
+ /* Send attach request to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+
+ /* If the platform has two NIX blocks then LF may be
+ * allocated from NIX1.
+ */
+ if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
+ if (!msix) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
+ pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
+ dev_err(pfvf->dev,
+ "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
+{
+ struct hwctx_disable_req *req;
+
+ otx2_mbox_lock(mbox);
+ /* Request AQ to disable this context */
+ if (npa)
+ req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
+
+ if (!req) {
+ otx2_mbox_unlock(mbox);
+ return;
+ }
+
+ req->ctype = type;
+
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
+ __func__);
+
+ otx2_mbox_unlock(mbox);
+}
+
+/* Mbox message handlers */
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp)
+{
+ int id;
+
+ for (id = 0; id < CGX_RX_STATS_COUNT; id++)
+ pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
+ for (id = 0; id < CGX_TX_STATS_COUNT; id++)
+ pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
+}
+
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ int lvl, schq;
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (schq = 0; schq < rsp->schq[lvl]; schq++)
+ pf->hw.txschq_list[lvl][schq] =
+ rsp->schq_list[lvl][schq];
+}
+
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
+ pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
+}
+
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.sqb_size = rsp->sqb_size;
+ pfvf->hw.rx_chan_base = rsp->rx_chan_base;
+ pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
+ pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+}
+
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp)
+{
+ pfvf->hw.npa_msixoff = rsp->npa_msixoff;
+ pfvf->hw.nix_msixoff = rsp->nix_msixoff;
+}
+
+void otx2_free_cints(struct otx2_nic *pfvf, int n)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ int irq, qidx;
+
+ for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ qidx < n;
+ qidx++, irq++) {
+ int vector = pci_irq_vector(pfvf->pdev, irq);
+
+ irq_set_affinity_hint(vector, NULL);
+ free_cpumask_var(hw->affinity_mask[irq]);
+ free_irq(vector, &qset->napi[qidx]);
+ }
+}
+
+void otx2_set_cints_affinity(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int vec, cpu, irq, cint;
+
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ cpu = cpumask_first(cpu_online_mask);
+
+ /* CQ interrupts */
+ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
+ if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
+ return;
+
+ cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
+
+ irq = pci_irq_vector(pfvf->pdev, vec);
+ irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
+
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ if (unlikely(cpu >= nr_cpu_ids))
+ cpu = 0;
+ }
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int __weak \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp) \
+{ \
+ /* Nothing to do here */ \
+ return 0; \
+} \
+EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
+MBOX_UP_CGX_MESSAGES
+#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
new file mode 100644
index 000000000000..320f3b7bf57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -0,0 +1,615 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_COMMON_H
+#define OTX2_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/iommu.h>
+
+#include <mbox.h>
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+
+#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+enum arua_mapped_qtypes {
+ AURA_NIX_RQ,
+ AURA_NIX_SQ,
+};
+
+/* NIX LF interrupts range*/
+#define NIX_LF_QINT_VEC_START 0x00
+#define NIX_LF_CINT_VEC_START 0x40
+#define NIX_LF_GINT_VEC 0x80
+#define NIX_LF_ERR_VEC 0x81
+#define NIX_LF_POISON_VEC 0x82
+
+/* RSS configuration */
+struct otx2_rss_info {
+ u8 enable;
+ u32 flowkey_cfg;
+ u16 rss_size;
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
+ u8 key[RSS_HASH_KEY_SIZE];
+};
+
+/* NIX (or NPC) RX errors */
+enum otx2_errlvl {
+ NPC_ERRLVL_RE,
+ NPC_ERRLVL_LID_LA,
+ NPC_ERRLVL_LID_LB,
+ NPC_ERRLVL_LID_LC,
+ NPC_ERRLVL_LID_LD,
+ NPC_ERRLVL_LID_LE,
+ NPC_ERRLVL_LID_LF,
+ NPC_ERRLVL_LID_LG,
+ NPC_ERRLVL_LID_LH,
+ NPC_ERRLVL_NIX = 0x0F,
+};
+
+enum otx2_errcodes_re {
+ /* NPC_ERRLVL_RE errcodes */
+ ERRCODE_FCS = 0x7,
+ ERRCODE_FCS_RCV = 0x8,
+ ERRCODE_UNDERSIZE = 0x10,
+ ERRCODE_OVERSIZE = 0x11,
+ ERRCODE_OL2_LEN_MISMATCH = 0x12,
+ /* NPC_ERRLVL_NIX errcodes */
+ ERRCODE_OL3_LEN = 0x10,
+ ERRCODE_OL4_LEN = 0x11,
+ ERRCODE_OL4_CSUM = 0x12,
+ ERRCODE_IL3_LEN = 0x20,
+ ERRCODE_IL4_LEN = 0x21,
+ ERRCODE_IL4_CSUM = 0x22,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+struct otx2_dev_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
+ u64 rx_drops;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+/* Driver counted stats */
+struct otx2_drv_stats {
+ atomic_t rx_fcs_errs;
+ atomic_t rx_oversize_errs;
+ atomic_t rx_undersize_errs;
+ atomic_t rx_csum_errs;
+ atomic_t rx_len_errs;
+ atomic_t rx_other_errs;
+};
+
+struct mbox {
+ struct otx2_mbox mbox;
+ struct work_struct mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct work_struct mbox_up_wrk;
+ struct otx2_nic *pfvf;
+ void *bbuf_base; /* Bounce buffer for mbox memory */
+ struct mutex lock; /* serialize mailbox access */
+ int num_msgs; /* mbox number of messages */
+ int up_num_msgs; /* mbox_up number of messages */
+};
+
+struct otx2_hw {
+ struct pci_dev *pdev;
+ struct otx2_rss_info rss_info;
+ u16 rx_queues;
+ u16 tx_queues;
+ u16 max_queues;
+ u16 pool_cnt;
+ u16 rqpool_cnt;
+ u16 sqpool_cnt;
+
+ /* NPA */
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 sqb_size;
+
+ /* NIX */
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+
+ /* HW settings, coalescing etc */
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u16 cq_qcount_wait;
+ u16 cq_ecount_wait;
+ u16 rq_skid;
+ u8 cq_time_wait;
+
+ /* For TSO segmentation */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 hw_tso;
+
+ /* MSI-X */
+ u8 cint_cnt; /* CQ interrupt count */
+ u16 npa_msixoff; /* Offset of NPA vectors */
+ u16 nix_msixoff; /* Offset of NIX vectors */
+ char *irq_name;
+ cpumask_var_t *affinity_mask;
+
+ /* Stats */
+ struct otx2_dev_stats dev_stats;
+ struct otx2_drv_stats drv_stats;
+ u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
+ u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct refill_work {
+ struct delayed_work pool_refill_work;
+ struct otx2_nic *pf;
+};
+
+struct otx2_nic {
+ void __iomem *reg_base;
+ struct net_device *netdev;
+ void *iommu_domain;
+ u16 max_frs;
+ u16 rbsize; /* Receive buffer size */
+
+#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+ u64 flags;
+
+ struct otx2_qset qset;
+ struct otx2_hw hw;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* Mbox */
+ struct mbox mbox;
+ struct workqueue_struct *mbox_wq;
+
+ u16 pcifunc; /* RVU PF_FUNC */
+ struct cgx_link_user_info linfo;
+
+ u64 reset_count;
+ struct work_struct reset_task;
+ struct refill_work *refill_wrk;
+
+ /* Ethtool stuff */
+ u32 msg_enable;
+
+ /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
+ int nix_blkaddr;
+};
+
+static inline bool is_96xx_A0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline bool is_96xx_B0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x01) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+
+ pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
+
+ hw->hw_tso = true;
+
+ if (is_96xx_A0(pfvf->pdev)) {
+ hw->hw_tso = false;
+
+ /* Time based irq coalescing is not supported */
+ pfvf->hw.cq_qcount_wait = 0x0;
+
+ /* Due to HW issue previous silicons required minimum
+ * 600 unused CQE to avoid CQ overflow.
+ */
+ pfvf->hw.rq_skid = 600;
+ pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
+ }
+}
+
+/* Register read/write APIs */
+static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
+{
+ u64 blkaddr;
+
+ switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
+ case BLKTYPE_NIX:
+ blkaddr = nic->nix_blkaddr;
+ break;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ break;
+ default:
+ blkaddr = BLKADDR_RVUM;
+ break;
+ };
+
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
+
+ return nic->reg_base + offset;
+}
+
+static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ writeq(val, addr);
+}
+
+static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ return readq(addr);
+}
+
+/* Mbox bounce buffer APIs */
+static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
+{
+ struct otx2_mbox *otx2_mbox;
+ struct otx2_mbox_dev *mdev;
+
+ mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!mbox->bbuf_base)
+ return -ENOMEM;
+
+ /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &mbox->mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+
+ otx2_mbox = &mbox->mbox_up;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+ return 0;
+}
+
+static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
+static inline void otx2_mbox_lock_init(struct mbox *mbox)
+{
+ mutex_init(&mbox->lock);
+}
+
+static inline void otx2_mbox_lock(struct mbox *mbox)
+{
+ mutex_lock(&mbox->lock);
+}
+
+static inline void otx2_mbox_unlock(struct mbox *mbox)
+{
+ mutex_unlock(&mbox->lock);
+}
+
+/* With the absence of API for 128-bit IO memory access for arm64,
+ * implement required operations at place.
+ */
+#if defined(CONFIG_ARM64)
+static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
+{
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
+ ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
+}
+
+static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r]"=r"(result), "+m"(*ptr)
+ : [i]"r"(incr), [b]"r"(ptr)
+ : "memory");
+ return result;
+}
+
+static inline u64 otx2_lmt_flush(uint64_t addr)
+{
+ u64 result = 0;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldeor xzr,%x[rf],[%[rs]]"
+ : [rf]"=r"(result)
+ : [rs]"r"(addr));
+ return result;
+}
+
+#else
+#define otx2_write128(lo, hi, addr)
+#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+#define otx2_lmt_flush(addr) ({ 0; })
+#endif
+
+/* Alloc pointer from pool/aura */
+static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
+{
+ u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
+ NPA_LF_AURA_OP_ALLOCX(0));
+ u64 incr = (u64)aura | BIT_ULL(63);
+
+ return otx2_atomic64_add(incr, ptr);
+}
+
+/* Free pointer to a pool/aura */
+static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
+ int aura, s64 buf)
+{
+ otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
+ otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+}
+
+/* Update page ref count */
+static inline void otx2_get_page(struct otx2_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ if (pool->pageref)
+ page_ref_add(pool->page, pool->pageref);
+ pool->pageref = 0;
+ pool->page = NULL;
+}
+
+static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
+{
+ if (type == AURA_NIX_SQ)
+ return pfvf->hw.rqpool_cnt + idx;
+
+ /* AURA_NIX_RQ */
+ return idx;
+}
+
+/* Mbox APIs */
+static inline int otx2_sync_mbox_msg(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
+}
+
+/* Use this API to send mbox msgs in atomic context
+ * where sleeping is not allowed
+ */
+static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &mbox->mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp); \
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Time to wait before watchdog kicks off */
+#define OTX2_TX_TIMEOUT (100 * HZ)
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+static inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t iova;
+
+ iova = dma_map_page_attrs(pfvf->dev, page,
+ offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova)))
+ return (dma_addr_t)NULL;
+ return iova;
+}
+
+static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_unmap_page_attrs(pfvf->dev, addr, size,
+ dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+/* MSI-X APIs */
+void otx2_free_cints(struct otx2_nic *pfvf, int n);
+void otx2_set_cints_affinity(struct otx2_nic *pfvf);
+int otx2_set_mac_address(struct net_device *netdev, void *p);
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
+void otx2_get_mac_from_af(struct net_device *netdev);
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+
+/* RVU block related APIs */
+int otx2_attach_npa_nix(struct otx2_nic *pfvf);
+int otx2_detach_resources(struct mbox *mbox);
+int otx2_config_npa(struct otx2_nic *pfvf);
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
+void otx2_aura_pool_free(struct otx2_nic *pfvf);
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
+int otx2_config_nix(struct otx2_nic *pfvf);
+int otx2_config_nix_queues(struct otx2_nic *pfvf);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txsch_alloc(struct otx2_nic *pfvf);
+int otx2_txschq_stop(struct otx2_nic *pfvf);
+void otx2_sqb_flush(struct otx2_nic *pfvf);
+dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ gfp_t gfp);
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+
+/* RSS configuration APIs*/
+int otx2_rss_init(struct otx2_nic *pfvf);
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
+void otx2_set_rss_key(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf);
+
+/* Mbox handlers */
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp);
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp);
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp);
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp);
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp);
+
+/* Device stats APIs */
+void otx2_get_dev_stats(struct otx2_nic *pfvf);
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
+void otx2_set_ethtool_ops(struct net_device *netdev);
+
+int otx2_open(struct net_device *netdev);
+int otx2_stop(struct net_device *netdev);
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
new file mode 100644
index 000000000000..60fcf82dd8cb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/stddef.h>
+#include <linux/etherdevice.h>
+#include <linux/log2.h>
+
+#include "otx2_common.h"
+
+#define DRV_NAME "octeontx2-nicpf"
+
+struct otx2_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+/* HW device stats */
+#define OTX2_DEV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
+}
+
+static const struct otx2_stat otx2_dev_stats[] = {
+ OTX2_DEV_STAT(rx_ucast_frames),
+ OTX2_DEV_STAT(rx_bcast_frames),
+ OTX2_DEV_STAT(rx_mcast_frames),
+
+ OTX2_DEV_STAT(tx_ucast_frames),
+ OTX2_DEV_STAT(tx_bcast_frames),
+ OTX2_DEV_STAT(tx_mcast_frames),
+};
+
+/* Driver level stats */
+#define OTX2_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
+}
+
+static const struct otx2_stat otx2_drv_stats[] = {
+ OTX2_DRV_STAT(rx_fcs_errs),
+ OTX2_DRV_STAT(rx_oversize_errs),
+ OTX2_DRV_STAT(rx_undersize_errs),
+ OTX2_DRV_STAT(rx_csum_errs),
+ OTX2_DRV_STAT(rx_len_errs),
+ OTX2_DRV_STAT(rx_other_errs),
+};
+
+static const struct otx2_stat otx2_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
+static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
+static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+
+static void otx2_dev_open(struct net_device *netdev)
+{
+ otx2_open(netdev);
+}
+
+static void otx2_dev_stop(struct net_device *netdev)
+{
+ otx2_stop(netdev);
+}
+
+static void otx2_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
+{
+ int start_qidx = qset * pfvf->hw.rx_queues;
+ int qidx, stats;
+
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(pfvf, &data, 0);
+
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2_get_qset_stats(struct otx2_nic *pfvf,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!pfvf)
+ return;
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ if (!otx2_update_rq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ if (!otx2_update_sq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+}
+
+/* Get device and per queue statistics */
+static void otx2_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(pfvf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&pfvf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(pfvf, stats, &data);
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ *(data++) = pfvf->reset_count;
+}
+
+static int otx2_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+}
+
+/* Get no of queues device supports and current queue count */
+static void otx2_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ channel->max_rx = pfvf->hw.max_queues;
+ channel->max_tx = pfvf->hw.max_queues;
+
+ channel->rx_count = pfvf->hw.rx_queues;
+ channel->tx_count = pfvf->hw.tx_queues;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int otx2_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ bool if_up = netif_running(dev);
+ int err = 0;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ if (if_up)
+ otx2_dev_stop(dev);
+
+ err = otx2_set_real_num_queues(dev, channel->tx_count,
+ channel->rx_count);
+ if (err)
+ goto fail;
+
+ pfvf->hw.rx_queues = channel->rx_count;
+ pfvf->hw.tx_queues = channel->tx_count;
+ pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
+
+fail:
+ if (if_up)
+ otx2_dev_open(dev);
+
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ pfvf->hw.tx_queues, pfvf->hw.rx_queues);
+
+ return err;
+}
+
+static void otx2_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+
+ ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+}
+
+static int otx2_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+ u32 rx_count, tx_count;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
+ rx_count = ring->rx_pending;
+ /* On some silicon variants a skid or reserved CQEs are
+ * needed to avoid CQ overflow.
+ */
+ if (rx_count < pfvf->hw.rq_skid)
+ rx_count = pfvf->hw.rq_skid;
+ rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
+
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to be maintained to avoid CQ overflow, hence the
+ * minimum 4K size.
+ */
+ tx_count = clamp_t(u32, ring->tx_pending,
+ Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
+ tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
+
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ return 0;
+
+ if (if_up)
+ otx2_dev_stop(netdev);
+
+ /* Assigned to the nearest possible exponent. */
+ qs->sqe_cnt = tx_count;
+ qs->rqe_cnt = rx_count;
+
+ if (if_up)
+ otx2_dev_open(netdev);
+ return 0;
+}
+
+static int otx2_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+
+ cmd->rx_coalesce_usecs = hw->cq_time_wait;
+ cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
+ cmd->tx_coalesce_usecs = hw->cq_time_wait;
+ cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+
+ return 0;
+}
+
+static int otx2_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx;
+
+ if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
+ ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq ||
+ ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs || ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high || ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high || ec->rate_sample_interval)
+ return -EOPNOTSUPP;
+
+ if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
+ return 0;
+
+ /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
+ * so clamp the user given value to the range of 1 to 25usec.
+ */
+ ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+ ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_time_wait == ec->rx_coalesce_usecs)
+ hw->cq_time_wait = ec->tx_coalesce_usecs;
+ else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
+ hw->cq_time_wait = ec->rx_coalesce_usecs;
+ else
+ hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
+ ec->tx_coalesce_usecs);
+
+ /* Max ecount_wait supported is 16bit,
+ * so clamp the user given value to the range of 1 to 64k.
+ */
+ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
+ 1, U16_MAX);
+ ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
+ 1, U16_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
+ else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
+ else
+ hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
+ ec->tx_max_coalesced_frames);
+
+ if (netif_running(netdev)) {
+ for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
+ otx2_config_irq_coalescing(pfvf, qidx);
+ }
+
+ return 0;
+}
+
+static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+
+ if (!(rss->flowkey_cfg &
+ (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
+ return 0;
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ u32 rss_cfg = rss->flowkey_cfg;
+
+ if (!rss->enable) {
+ netdev_err(pfvf->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* Different config for v4 and v6 is not supported.
+ * Both of them have to be either 4-tuple or 2-tuple.
+ */
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rss->flowkey_cfg = rss_cfg;
+ otx2_set_flowkey_cfg(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
+
+ return sizeof(rss->key);
+}
+
+static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ return pfvf->hw.rss_info.rss_size;
+}
+
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ int idx;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ int idx;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+
+ otx2_set_rss_table(pfvf);
+ return 0;
+}
+
+static u32 otx2_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->msg_enable;
+}
+
+static void otx2_set_msglevel(struct net_device *netdev, u32 val)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ pfvf->msg_enable = val;
+}
+
+static u32 otx2_get_link(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->linfo.link_up;
+}
+
+static const struct ethtool_ops otx2_ethtool_ops = {
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2_get_drvinfo,
+ .get_strings = otx2_get_strings,
+ .get_ethtool_stats = otx2_get_ethtool_stats,
+ .get_sset_count = otx2_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+};
+
+void otx2_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
new file mode 100644
index 000000000000..85f9b9ba6bd5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -0,0 +1,1349 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/if_vlan.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_txrx.h"
+#include "otx2_struct.h"
+
+#define DRV_NAME "octeontx2-nicpf"
+#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id otx2_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+
+enum {
+ TYPE_PFAF,
+ TYPE_PFVF,
+};
+
+static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2_open(netdev);
+
+ return err;
+}
+
+static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr, int type)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ /* The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+ if (hdr->num_msgs) {
+ mw[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_wrk);
+ }
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_up_wrk);
+ }
+ }
+}
+
+static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ pf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_TXSCH_ALLOC:
+ mbox_handler_nix_txsch_alloc(pf,
+ (struct nix_txsch_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_CGX_STATS:
+ mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+}
+
+static void otx2_pfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ struct otx2_nic *pf;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ pf = af_mbox->pfvf;
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2_process_pfaf_mbox_msg(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static void otx2_handle_link_event(struct otx2_nic *pf)
+{
+ struct cgx_link_user_info *linfo = &pf->linfo;
+ struct net_device *netdev = pf->netdev;
+
+ pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed,
+ linfo->full_duplex ? "Full" : "Half");
+ if (linfo->link_up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+}
+
+int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
+ struct cgx_link_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ /* Copy the link info sent by AF */
+ pf->linfo = msg->link_info;
+
+ /* interface has not been fully configured yet */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
+ otx2_handle_link_event(pf);
+ return 0;
+}
+
+static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &pf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ pf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+ break;
+ default:
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_mbox *mbox = &af_mbox->mbox_up;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_nic *pf = af_mbox->pfvf;
+ int offset, id, devid = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ /* Skip processing VF's messages */
+ if (!devid)
+ otx2_process_mbox_msg_up(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ struct mbox *mbox;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+
+ mbox = &pf->mbox;
+ otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_mbox_intr(struct otx2_nic *pf)
+{
+ int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+
+ /* Disable AF => PF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, pf);
+}
+
+static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from AF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_af)
+ return 0;
+
+ /* Check mailbox communication with AF */
+ req = otx2_mbox_alloc_msg_ready(&pf->mbox);
+ if (!req) {
+ otx2_disable_mbox_intr(pf);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2_disable_mbox_intr(pf);
+ return -EPROBE_DEFER;
+ }
+
+ return 0;
+}
+
+static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+
+ if (pf->mbox_wq) {
+ flush_workqueue(pf->mbox_wq);
+ destroy_workqueue(pf->mbox_wq);
+ pf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = pf;
+ pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!pf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, pf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
+ otx2_mbox_lock_init(&pf->mbox);
+
+ return 0;
+exit:
+ otx2_pfaf_mbox_destroy(pf);
+ return err;
+}
+
+static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+ return err;
+}
+
+static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+ return err;
+}
+
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+{
+ struct otx2_nic *pf = data;
+ u64 val, *ptr;
+ u64 qidx = 0;
+
+ /* CQ */
+ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+
+ otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
+ (val & NIX_CQERRINT_BITS));
+ if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+ netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ /* SQ */
+ for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+ otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
+ (val & NIX_SQINT_BITS));
+
+ if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SQ_OP_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
+ qidx,
+ otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SEND_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
+{
+ struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
+ struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
+ int qidx = cq_poll->cint_idx;
+
+ /* Disable interrupts.
+ *
+ * Completion interrupts behave in a level-triggered interrupt
+ * fashion, and hence have to be cleared only after it is serviced.
+ */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ /* Schedule NAPI */
+ napi_schedule_irqoff(&cq_poll->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_napi(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ }
+}
+
+static void otx2_free_cq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_queue *cq;
+ int qidx;
+
+ /* Disable CQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ qmem_free(pf->dev, cq->cqe);
+ }
+}
+
+static void otx2_free_sq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_snd_queue *sq;
+ int qidx;
+
+ /* Disable SQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
+ /* Free SQB pointers */
+ otx2_sq_free_sqbs(pf);
+ for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ sq = &qset->sq[qidx];
+ qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->tso_hdrs);
+ kfree(sq->sg);
+ kfree(sq->sqb_ptrs);
+ }
+}
+
+static int otx2_init_hw_resources(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ int err = 0, lvl;
+
+ /* Set required NPA LF's pool counts
+ * Auras and Pools are used in a 1:1 mapping,
+ * so, aura count = pool count.
+ */
+ hw->rqpool_cnt = hw->rx_queues;
+ hw->sqpool_cnt = hw->tx_queues;
+ hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
+
+ /* Get the size of receive buffers to allocate */
+ pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+
+ otx2_mbox_lock(mbox);
+ /* NPA init */
+ err = otx2_config_npa(pf);
+ if (err)
+ goto exit;
+
+ /* NIX init */
+ err = otx2_config_nix(pf);
+ if (err)
+ goto err_free_npa_lf;
+
+ /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
+ err = otx2_rq_aura_pool_init(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_nix_lf;
+ }
+ /* Init Auras and pools used by NIX SQ, for queueing SQEs */
+ err = otx2_sq_aura_pool_init(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_rq_ptrs;
+ }
+
+ err = otx2_txsch_alloc(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_sq_ptrs;
+ }
+
+ err = otx2_config_nix_queues(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_txsch;
+ }
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pf, lvl);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_nix_queues;
+ }
+ }
+ otx2_mbox_unlock(mbox);
+ return err;
+
+err_free_nix_queues:
+ otx2_free_sq_res(pf);
+ otx2_free_cq_res(pf);
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+err_free_txsch:
+ if (otx2_txschq_stop(pf))
+ dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
+err_free_sq_ptrs:
+ otx2_sq_free_sqbs(pf);
+err_free_rq_ptrs:
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+err_free_nix_lf:
+ otx2_mbox_lock(mbox);
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+err_free_npa_lf:
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+exit:
+ otx2_mbox_unlock(mbox);
+ return err;
+}
+
+static void otx2_free_hw_resources(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_cq_queue *cq;
+ struct msg_req *req;
+ int qidx, err;
+
+ /* Ensure all SQE are processed */
+ otx2_sqb_flush(pf);
+
+ /* Stop transmission */
+ err = otx2_txschq_stop(pf);
+ if (err)
+ dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+
+ /* Disable RQs */
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+
+ /*Dequeue all CQEs */
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ if (cq->cq_type == CQ_RX)
+ otx2_cleanup_rx_cqes(pf, cq);
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
+
+ otx2_free_sq_res(pf);
+
+ /* Free RQ buffer pointers*/
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+
+ otx2_free_cq_res(pf);
+
+ otx2_mbox_lock(mbox);
+ /* Reset NIX LF */
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+ otx2_mbox_unlock(mbox);
+
+ /* Disable NPA Pool and Aura hw context */
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+
+ otx2_mbox_lock(mbox);
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+ otx2_mbox_unlock(mbox);
+}
+
+int otx2_open(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ netif_carrier_off(netdev);
+
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ /* RQ and SQs are mapped to different CQs,
+ * so find out max CQ IRQs (i.e CINTs) needed.
+ */
+ pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
+ qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* CQ size of RQ */
+ qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ /* CQ size of SQ */
+ qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+
+ err = -ENOMEM;
+ qset->cq = kcalloc(pf->qset.cq_cnt,
+ sizeof(struct otx2_cq_queue), GFP_KERNEL);
+ if (!qset->cq)
+ goto err_free_mem;
+
+ qset->sq = kcalloc(pf->hw.tx_queues,
+ sizeof(struct otx2_snd_queue), GFP_KERNEL);
+ if (!qset->sq)
+ goto err_free_mem;
+
+ qset->rq = kcalloc(pf->hw.rx_queues,
+ sizeof(struct otx2_rcv_queue), GFP_KERNEL);
+ if (!qset->rq)
+ goto err_free_mem;
+
+ err = otx2_init_hw_resources(pf);
+ if (err)
+ goto err_free_mem;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ /* RQ0 & SQ0 are mapped to CINT0 and so on..
+ * 'cq_ids[0]' points to RQ's CQ and
+ * 'cq_ids[1]' points to SQ's CQ and
+ */
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
+ qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ cq_poll->dev = (void *)pf;
+ netif_napi_add(netdev, &cq_poll->napi,
+ otx2_napi_handler, NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ }
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(pf, netdev->mtu);
+ if (err)
+ goto err_disable_napi;
+
+ /* Initialize RSS */
+ err = otx2_rss_init(pf);
+ if (err)
+ goto err_disable_napi;
+
+ /* Register Queue IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_q_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for QERR\n",
+ rvu_get_pf(pf->pcifunc));
+ goto err_disable_napi;
+ }
+
+ /* Enable QINT IRQ */
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
+
+ /* Register CQ IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
+ qidx);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d\n",
+ rvu_get_pf(pf->pcifunc), qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ otx2_config_irq_coalescing(pf, qidx);
+
+ /* Enable CQ IRQ */
+ otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+
+ otx2_set_cints_affinity(pf);
+
+ pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* we have already received link status notification */
+ if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_handle_link_event(pf);
+
+ err = otx2_rxtx_enable(pf, true);
+ if (err)
+ goto err_free_cints;
+
+ return 0;
+
+err_free_cints:
+ otx2_free_cints(pf, qidx);
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ synchronize_irq(vec);
+ free_irq(vec, pf);
+err_disable_napi:
+ otx2_disable_napi(pf);
+ otx2_free_hw_resources(pf);
+err_free_mem:
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ return err;
+}
+
+int otx2_stop(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int qidx, vec, wrk;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* First stop packet Rx/Tx */
+ otx2_rxtx_enable(pf, false);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ synchronize_irq(vec);
+ free_irq(vec, pf);
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(pf->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+
+ netif_tx_disable(netdev);
+
+ otx2_free_hw_resources(pf);
+ otx2_free_cints(pf, pf->hw.cint_cnt);
+ otx2_disable_napi(pf);
+
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ /* Do not clear RQ/SQ ringsize settings */
+ memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
+ sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
+ return 0;
+}
+
+static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sq = &pf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void otx2_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct nix_rx_mode *req;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ otx2_mbox_lock(&pf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pf->mbox);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ /* We don't support MAC address filtering yet */
+ if (netdev->flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+}
+
+static int otx2_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return otx2_cgx_config_loopback(pf,
+ features & NETIF_F_LOOPBACK);
+ return 0;
+}
+
+static void otx2_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
+
+ if (!netif_running(pf->netdev))
+ return;
+
+ otx2_stop(pf->netdev);
+ pf->reset_count++;
+ otx2_open(pf->netdev);
+ netif_trans_update(pf->netdev);
+}
+
+static const struct net_device_ops otx2_netdev_ops = {
+ .ndo_open = otx2_open,
+ .ndo_stop = otx2_stop,
+ .ndo_start_xmit = otx2_xmit,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2_change_mtu,
+ .ndo_set_rx_mode = otx2_set_rx_mode,
+ .ndo_set_features = otx2_set_features,
+ .ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_get_stats64 = otx2_get_stats64,
+};
+
+static int otx2_check_pf_usable(struct otx2_nic *nic)
+{
+ u64 rev;
+
+ rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(nic->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+{
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ /* NPA interrupts are inot registered, so alloc only
+ * upto NIX vector offset.
+ */
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2_disable_mbox_intr(pf);
+ pci_free_irq_vectors(hw->pdev);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2_register_mbox_intr(pf, false);
+}
+
+static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct otx2_nic *pf;
+ struct otx2_hw *hw;
+ int err, qcount;
+ int num_vec;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* Set number of queues */
+ qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+
+ netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pf = netdev_priv(netdev);
+ pf->netdev = netdev;
+ pf->pdev = pdev;
+ pf->dev = dev;
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+
+ hw = &pf->hw;
+ hw->pdev = pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+
+ num_vec = pci_msix_vec_count(pdev);
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ goto err_free_netdev;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ goto err_free_netdev;
+
+ /* Map CSRs */
+ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!pf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = otx2_check_pf_usable(pf);
+ if (err)
+ goto err_free_netdev;
+
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ /* Init PF <=> AF mailbox stuff */
+ err = otx2_pfaf_mbox_init(pf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2_register_mbox_intr(pf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and NIX LFs to this PF.
+ * NIX and NPA LFs are needed for this PF to function as a NIC.
+ */
+ err = otx2_attach_npa_nix(pf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
+ if (err)
+ goto err_detach_rsrc;
+
+ otx2_setup_dev_hw_settings(pf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
+ * HW allocates buffer pointer from stack and uses it for DMA'ing
+ * ingress packet. In some scenarios HW can free back allocated buffer
+ * pointers to pool. This makes it impossible for SW to maintain a
+ * parallel list where physical addresses of buffer pointers (IOVAs)
+ * given to HW can be saved for later reference.
+ *
+ * So the only way to convert Rx packet's buffer address is to use
+ * IOMMU's iova_to_phys() handler which translates the address by
+ * walking through the translation tables.
+ */
+ pf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+
+ netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2_netdev_ops;
+
+ /* MTU range: 64 - 9190 */
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = OTX2_MAX_MTU;
+
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_detach_rsrc;
+ }
+
+ otx2_set_ethtool_ops(netdev);
+
+ /* Enable link notifications */
+ otx2_cgx_config_linkevents(pf, true);
+
+ return 0;
+
+err_detach_rsrc:
+ otx2_detach_resources(&pf->mbox);
+err_disable_mbox_intr:
+ otx2_disable_mbox_intr(pf);
+err_mbox_destroy:
+ otx2_pfaf_mbox_destroy(pf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf;
+
+ if (!netdev)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ /* Disable link notifications */
+ otx2_cgx_config_linkevents(pf, false);
+
+ unregister_netdev(netdev);
+ otx2_detach_resources(&pf->mbox);
+ otx2_disable_mbox_intr(pf);
+ otx2_pfaf_mbox_destroy(pf);
+ pci_free_irq_vectors(pf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2_pf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_pf_id_table,
+ .probe = otx2_probe,
+ .shutdown = otx2_remove,
+ .remove = otx2_remove,
+};
+
+static int __init otx2_rvupf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2_pf_driver);
+}
+
+static void __exit otx2_rvupf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2_pf_driver);
+}
+
+module_init(otx2_rvupf_init_module);
+module_exit(otx2_rvupf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
new file mode 100644
index 000000000000..7963d418886a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_REG_H
+#define OTX2_REG_H
+
+#include <rvu_struct.h>
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+#define RVU_FUNC_BLKADDR_SHIFT 20
+#define RVU_FUNC_BLKADDR_MASK 0x1FULL
+
+/* NPA LF registers */
+#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
+#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20)
+#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28)
+#define NPA_LF_AURA_OP_CNT (NPA_LFBASE | 0x30)
+#define NPA_LF_AURA_OP_LIMIT (NPA_LFBASE | 0x50)
+#define NPA_LF_AURA_OP_INT (NPA_LFBASE | 0x60)
+#define NPA_LF_AURA_OP_THRESH (NPA_LFBASE | 0x70)
+#define NPA_LF_POOL_OP_PC (NPA_LFBASE | 0x100)
+#define NPA_LF_POOL_OP_AVAILABLE (NPA_LFBASE | 0x110)
+#define NPA_LF_POOL_OP_PTR_START0 (NPA_LFBASE | 0x120)
+#define NPA_LF_POOL_OP_PTR_START1 (NPA_LFBASE | 0x128)
+#define NPA_LF_POOL_OP_PTR_END0 (NPA_LFBASE | 0x130)
+#define NPA_LF_POOL_OP_PTR_END1 (NPA_LFBASE | 0x138)
+#define NPA_LF_POOL_OP_INT (NPA_LFBASE | 0x160)
+#define NPA_LF_POOL_OP_THRESH (NPA_LFBASE | 0x170)
+#define NPA_LF_ERR_INT (NPA_LFBASE | 0x200)
+#define NPA_LF_ERR_INT_W1S (NPA_LFBASE | 0x208)
+#define NPA_LF_ERR_INT_ENA_W1C (NPA_LFBASE | 0x210)
+#define NPA_LF_ERR_INT_ENA_W1S (NPA_LFBASE | 0x218)
+#define NPA_LF_RAS (NPA_LFBASE | 0x220)
+#define NPA_LF_RAS_W1S (NPA_LFBASE | 0x228)
+#define NPA_LF_RAS_ENA_W1C (NPA_LFBASE | 0x230)
+#define NPA_LF_RAS_ENA_W1S (NPA_LFBASE | 0x238)
+#define NPA_LF_QINTX_CNT(a) (NPA_LFBASE | 0x300 | (a) << 12)
+#define NPA_LF_QINTX_INT(a) (NPA_LFBASE | 0x310 | (a) << 12)
+#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+
+/* NIX LF registers */
+#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
+#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3)
+#define NIX_LF_CFG (NIX_LFBASE | 0x100)
+#define NIX_LF_GINT (NIX_LFBASE | 0x200)
+#define NIX_LF_GINT_W1S (NIX_LFBASE | 0x208)
+#define NIX_LF_GINT_ENA_W1C (NIX_LFBASE | 0x210)
+#define NIX_LF_GINT_ENA_W1S (NIX_LFBASE | 0x218)
+#define NIX_LF_ERR_INT (NIX_LFBASE | 0x220)
+#define NIX_LF_ERR_INT_W1S (NIX_LFBASE | 0x228)
+#define NIX_LF_ERR_INT_ENA_W1C (NIX_LFBASE | 0x230)
+#define NIX_LF_ERR_INT_ENA_W1S (NIX_LFBASE | 0x238)
+#define NIX_LF_RAS (NIX_LFBASE | 0x240)
+#define NIX_LF_RAS_W1S (NIX_LFBASE | 0x248)
+#define NIX_LF_RAS_ENA_W1C (NIX_LFBASE | 0x250)
+#define NIX_LF_RAS_ENA_W1S (NIX_LFBASE | 0x258)
+#define NIX_LF_SQ_OP_ERR_DBG (NIX_LFBASE | 0x260)
+#define NIX_LF_MNQ_ERR_DBG (NIX_LFBASE | 0x270)
+#define NIX_LF_SEND_ERR_DBG (NIX_LFBASE | 0x280)
+#define NIX_LF_TX_STATX(a) (NIX_LFBASE | 0x300 | (a) << 3)
+#define NIX_LF_RX_STATX(a) (NIX_LFBASE | 0x400 | (a) << 3)
+#define NIX_LF_OP_SENDX(a) (NIX_LFBASE | 0x800 | (a) << 3)
+#define NIX_LF_RQ_OP_INT (NIX_LFBASE | 0x900)
+#define NIX_LF_RQ_OP_OCTS (NIX_LFBASE | 0x910)
+#define NIX_LF_RQ_OP_PKTS (NIX_LFBASE | 0x920)
+#define NIX_LF_OP_IPSEC_DYNO_CN (NIX_LFBASE | 0x980)
+#define NIX_LF_SQ_OP_INT (NIX_LFBASE | 0xa00)
+#define NIX_LF_SQ_OP_OCTS (NIX_LFBASE | 0xa10)
+#define NIX_LF_SQ_OP_PKTS (NIX_LFBASE | 0xa20)
+#define NIX_LF_SQ_OP_STATUS (NIX_LFBASE | 0xa30)
+#define NIX_LF_CQ_OP_INT (NIX_LFBASE | 0xb00)
+#define NIX_LF_CQ_OP_DOOR (NIX_LFBASE | 0xb30)
+#define NIX_LF_CQ_OP_STATUS (NIX_LFBASE | 0xb40)
+#define NIX_LF_QINTX_CNT(a) (NIX_LFBASE | 0xC00 | (a) << 12)
+#define NIX_LF_QINTX_INT(a) (NIX_LFBASE | 0xC10 | (a) << 12)
+#define NIX_LF_QINTX_INT_W1S(a) (NIX_LFBASE | 0xC18 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1S(a) (NIX_LFBASE | 0xC20 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1C(a) (NIX_LFBASE | 0xC30 | (a) << 12)
+#define NIX_LF_CINTX_CNT(a) (NIX_LFBASE | 0xD00 | (a) << 12)
+#define NIX_LF_CINTX_WAIT(a) (NIX_LFBASE | 0xD10 | (a) << 12)
+#define NIX_LF_CINTX_INT(a) (NIX_LFBASE | 0xD20 | (a) << 12)
+#define NIX_LF_CINTX_INT_W1S(a) (NIX_LFBASE | 0xD30 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
+
+/* NIX AF transmit scheduler registers */
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+
+/* LMT LF registers */
+#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
+#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
+#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+
+#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
new file mode 100644
index 000000000000..cba59ddf71bb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_STRUCT_H
+#define OTX2_STRUCT_H
+
+/* NIX WQE/CQE size 128 byte or 512 byte */
+enum nix_cqesz_e {
+ NIX_XQESZ_W64 = 0x0,
+ NIX_XQESZ_W16 = 0x1,
+};
+
+enum nix_sqes_e {
+ NIX_SQESZ_W16 = 0x0,
+ NIX_SQESZ_W8 = 0x1,
+};
+
+enum nix_send_ldtype {
+ NIX_SEND_LDTYPE_LDD = 0x0,
+ NIX_SEND_LDTYPE_LDT = 0x1,
+ NIX_SEND_LDTYPE_LDWB = 0x2,
+};
+
+/* CSUM offload */
+enum nix_sendl3type {
+ NIX_SENDL3TYPE_NONE = 0x0,
+ NIX_SENDL3TYPE_IP4 = 0x2,
+ NIX_SENDL3TYPE_IP4_CKSUM = 0x3,
+ NIX_SENDL3TYPE_IP6 = 0x4,
+};
+
+enum nix_sendl4type {
+ NIX_SENDL4TYPE_NONE,
+ NIX_SENDL4TYPE_TCP_CKSUM,
+ NIX_SENDL4TYPE_SCTP_CKSUM,
+ NIX_SENDL4TYPE_UDP_CKSUM,
+};
+
+/* NIX wqe/cqe types */
+enum nix_xqe_type {
+ NIX_XQE_TYPE_INVALID = 0x0,
+ NIX_XQE_TYPE_RX = 0x1,
+ NIX_XQE_TYPE_RX_IPSECS = 0x2,
+ NIX_XQE_TYPE_RX_IPSECH = 0x3,
+ NIX_XQE_TYPE_RX_IPSECD = 0x4,
+ NIX_XQE_TYPE_SEND = 0x8,
+};
+
+/* NIX CQE/SQE subdescriptor types */
+enum nix_subdc {
+ NIX_SUBDC_NOP = 0x0,
+ NIX_SUBDC_EXT = 0x1,
+ NIX_SUBDC_CRC = 0x2,
+ NIX_SUBDC_IMM = 0x3,
+ NIX_SUBDC_SG = 0x4,
+ NIX_SUBDC_MEM = 0x5,
+ NIX_SUBDC_JUMP = 0x6,
+ NIX_SUBDC_WORK = 0x7,
+ NIX_SUBDC_SOD = 0xf,
+};
+
+/* Algorithm for nix_sqe_mem_s header (value of the `alg` field) */
+enum nix_sendmemalg {
+ NIX_SENDMEMALG_E_SET = 0x0,
+ NIX_SENDMEMALG_E_SETTSTMP = 0x1,
+ NIX_SENDMEMALG_E_SETRSLT = 0x2,
+ NIX_SENDMEMALG_E_ADD = 0x8,
+ NIX_SENDMEMALG_E_SUB = 0x9,
+ NIX_SENDMEMALG_E_ADDLEN = 0xa,
+ NIX_SENDMEMALG_E_SUBLEN = 0xb,
+ NIX_SENDMEMALG_E_ADDMBUF = 0xc,
+ NIX_SENDMEMALG_E_SUBMBUF = 0xd,
+ NIX_SENDMEMALG_E_ENUM_LAST = 0xe,
+};
+
+/* NIX CQE header structure */
+struct nix_cqe_hdr_s {
+ u64 flow_tag : 32;
+ u64 q : 20;
+ u64 reserved_52_57 : 6;
+ u64 node : 2;
+ u64 cqe_type : 4;
+};
+
+/* NIX CQE RX parse structure */
+struct nix_rx_parse_s {
+ u64 chan : 12;
+ u64 desc_sizem1 : 5;
+ u64 rsvd_17 : 1;
+ u64 express : 1;
+ u64 wqwd : 1;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 latype : 4;
+ u64 lbtype : 4;
+ u64 lctype : 4;
+ u64 ldtype : 4;
+ u64 letype : 4;
+ u64 lftype : 4;
+ u64 lgtype : 4;
+ u64 lhtype : 4;
+ u64 pkt_lenm1 : 16; /* W1 */
+ u64 l2m : 1;
+ u64 l2b : 1;
+ u64 l3m : 1;
+ u64 l3b : 1;
+ u64 vtag0_valid : 1;
+ u64 vtag0_gone : 1;
+ u64 vtag1_valid : 1;
+ u64 vtag1_gone : 1;
+ u64 pkind : 6;
+ u64 rsvd_95_94 : 2;
+ u64 vtag0_tci : 16;
+ u64 vtag1_tci : 16;
+ u64 laflags : 8; /* W2 */
+ u64 lbflags : 8;
+ u64 lcflags : 8;
+ u64 ldflags : 8;
+ u64 leflags : 8;
+ u64 lfflags : 8;
+ u64 lgflags : 8;
+ u64 lhflags : 8;
+ u64 eoh_ptr : 8; /* W3 */
+ u64 wqe_aura : 20;
+ u64 pb_aura : 20;
+ u64 match_id : 16;
+ u64 laptr : 8; /* W4 */
+ u64 lbptr : 8;
+ u64 lcptr : 8;
+ u64 ldptr : 8;
+ u64 leptr : 8;
+ u64 lfptr : 8;
+ u64 lgptr : 8;
+ u64 lhptr : 8;
+ u64 vtag0_ptr : 8; /* W5 */
+ u64 vtag1_ptr : 8;
+ u64 flow_key_alg : 5;
+ u64 rsvd_383_341 : 43;
+ u64 rsvd_447_384; /* W6 */
+};
+
+/* NIX CQE RX scatter/gather subdescriptor structure */
+struct nix_rx_sg_s {
+ u64 seg_size : 16; /* W0 */
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_59_50 : 10;
+ u64 subdc : 4;
+ u64 seg_addr;
+ u64 seg2_addr;
+ u64 seg3_addr;
+};
+
+struct nix_send_comp_s {
+ u64 status : 8;
+ u64 sqe_id : 16;
+ u64 rsvd_24_63 : 40;
+};
+
+struct nix_cqe_rx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_rx_parse_s parse;
+ struct nix_rx_sg_s sg;
+};
+
+struct nix_cqe_tx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_send_comp_s comp;
+};
+
+/* NIX SQE header structure */
+struct nix_sqe_hdr_s {
+ u64 total : 18; /* W0 */
+ u64 reserved_18 : 1;
+ u64 df : 1;
+ u64 aura : 20;
+ u64 sizem1 : 3;
+ u64 pnc : 1;
+ u64 sq : 20;
+ u64 ol3ptr : 8; /* W1 */
+ u64 ol4ptr : 8;
+ u64 il3ptr : 8;
+ u64 il4ptr : 8;
+ u64 ol3type : 4;
+ u64 ol4type : 4;
+ u64 il3type : 4;
+ u64 il4type : 4;
+ u64 sqe_id : 16;
+
+};
+
+/* NIX send extended header subdescriptor structure */
+struct nix_sqe_ext_s {
+ u64 lso_mps : 14; /* W0 */
+ u64 lso : 1;
+ u64 tstmp : 1;
+ u64 lso_sb : 8;
+ u64 lso_format : 5;
+ u64 rsvd_31_29 : 3;
+ u64 shp_chg : 9;
+ u64 shp_dis : 1;
+ u64 shp_ra : 2;
+ u64 markptr : 8;
+ u64 markform : 7;
+ u64 mark_en : 1;
+ u64 subdc : 4;
+ u64 vlan0_ins_ptr : 8; /* W1 */
+ u64 vlan0_ins_tci : 16;
+ u64 vlan1_ins_ptr : 8;
+ u64 vlan1_ins_tci : 16;
+ u64 vlan0_ins_ena : 1;
+ u64 vlan1_ins_ena : 1;
+ u64 rsvd_127_114 : 14;
+};
+
+struct nix_sqe_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_54_50 : 5;
+ u64 i1 : 1;
+ u64 i2 : 1;
+ u64 i3 : 1;
+ u64 ld_type : 2;
+ u64 subdc : 4;
+};
+
+/* NIX send memory subdescriptor structure */
+struct nix_sqe_mem_s {
+ u64 offset : 16; /* W0 */
+ u64 rsvd_52_16 : 37;
+ u64 wmem : 1;
+ u64 dsz : 2;
+ u64 alg : 4;
+ u64 subdc : 4;
+ u64 addr; /* W1 */
+};
+
+enum nix_cqerrint_e {
+ NIX_CQERRINT_DOOR_ERR = 0,
+ NIX_CQERRINT_WR_FULL = 1,
+ NIX_CQERRINT_CQE_FAULT = 2,
+};
+
+#define NIX_CQERRINT_BITS (BIT_ULL(NIX_CQERRINT_DOOR_ERR) | \
+ BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+
+enum nix_rqint_e {
+ NIX_RQINT_DROP = 0,
+ NIX_RQINT_RED = 1,
+};
+
+#define NIX_RQINT_BITS (BIT_ULL(NIX_RQINT_DROP) | BIT_ULL(NIX_RQINT_RED))
+
+enum nix_sqint_e {
+ NIX_SQINT_LMT_ERR = 0,
+ NIX_SQINT_MNQ_ERR = 1,
+ NIX_SQINT_SEND_ERR = 2,
+ NIX_SQINT_SQB_ALLOC_FAIL = 3,
+};
+
+#define NIX_SQINT_BITS (BIT_ULL(NIX_SQINT_LMT_ERR) | \
+ BIT_ULL(NIX_SQINT_MNQ_ERR) | \
+ BIT_ULL(NIX_SQINT_SEND_ERR) | \
+ BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+
+#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
new file mode 100644
index 000000000000..bef4c20fe314
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_txrx.h"
+
+#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+
+static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_hdr_s *cqe_hdr;
+
+ cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
+ return NULL;
+
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ return cqe_hdr;
+}
+
+static unsigned int frag_num(unsigned int i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
+}
+
+static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ int seg;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE);
+ }
+ sg->num_segs = 0;
+}
+
+static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe,
+ int budget, int *tx_pkts, int *tx_bytes)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sk_buff *skb = NULL;
+ struct sg_list *sg;
+
+ if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
+ net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
+ pfvf->netdev->name, cq->cint_idx,
+ snd_comp->status);
+
+ sg = &sq->sg[snd_comp->sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(!skb))
+ return;
+
+ *tx_bytes += skb->len;
+ (*tx_pkts)++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ napi_consume_skb(skb, budget);
+ sg->skb = (u64)NULL;
+}
+
+static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len)
+{
+ struct page *page;
+ void *va;
+
+ va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+ page = virt_to_page(va);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page), len, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+}
+
+static void otx2_set_rxhash(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+ struct otx2_rss_info *rss;
+ u32 hash = 0;
+
+ if (!(pfvf->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss = &pfvf->hw.rss_info;
+ if (rss->flowkey_cfg) {
+ if (rss->flowkey_cfg &
+ ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
+ hash_type = PKT_HASH_TYPE_L4;
+ else
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe->hdr.flow_tag;
+ }
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, int qidx)
+{
+ struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
+ struct nix_rx_parse_s *parse = &cqe->parse;
+
+ if (netif_msg_rx_err(pfvf))
+ netdev_err(pfvf->netdev,
+ "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
+ qidx, parse->errlev, parse->errcode);
+
+ if (parse->errlev == NPC_ERRLVL_RE) {
+ switch (parse->errcode) {
+ case ERRCODE_FCS:
+ case ERRCODE_FCS_RCV:
+ atomic_inc(&stats->rx_fcs_errs);
+ break;
+ case ERRCODE_UNDERSIZE:
+ atomic_inc(&stats->rx_undersize_errs);
+ break;
+ case ERRCODE_OVERSIZE:
+ atomic_inc(&stats->rx_oversize_errs);
+ break;
+ case ERRCODE_OL2_LEN_MISMATCH:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else if (parse->errlev == NPC_ERRLVL_NIX) {
+ switch (parse->errcode) {
+ case ERRCODE_OL3_LEN:
+ case ERRCODE_OL4_LEN:
+ case ERRCODE_IL3_LEN:
+ case ERRCODE_IL4_LEN:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ case ERRCODE_OL4_CSUM:
+ case ERRCODE_IL4_CSUM:
+ atomic_inc(&stats->rx_csum_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else {
+ atomic_inc(&stats->rx_other_errs);
+ /* For now ignore all the NPC parser errors and
+ * pass the packets to stack.
+ */
+ return false;
+ }
+
+ /* If RXALL is enabled pass on packets to stack. */
+ if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
+ return false;
+
+ /* Free buffer back to pool */
+ if (cqe->sg.segs)
+ otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
+ return true;
+}
+
+static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq,
+ struct nix_cqe_rx_s *cqe)
+{
+ struct nix_rx_parse_s *parse = &cqe->parse;
+ struct sk_buff *skb = NULL;
+
+ if (unlikely(parse->errlev || parse->errcode)) {
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ return;
+ }
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb))
+ return;
+
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ cq->pool_ptrs++;
+
+ otx2_set_rxhash(pfvf, cqe, skb);
+
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_frags(napi);
+}
+
+static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq, int budget)
+{
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+ s64 bufptr;
+
+ while (likely(processed_cqe < budget)) {
+ cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
+ !cqe->sg.seg_addr) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ cqe->sg.seg_addr = 0x00;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (unlikely(!cq->pool_ptrs))
+ return 0;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ if (unlikely(bufptr <= 0)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ break;
+ }
+ otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+
+ return processed_cqe;
+}
+
+static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq, int budget)
+{
+ int tx_pkts = 0, tx_bytes = 0;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+
+ while (likely(processed_cqe < budget)) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ if (unlikely(!cqe)) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (likely(tx_pkts)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ /* Check if queue was stopped earlier due to ring full */
+ smp_mb();
+ if (netif_tx_queue_stopped(txq) &&
+ netif_carrier_ok(pfvf->netdev))
+ netif_tx_wake_queue(txq);
+ }
+ return 0;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget)
+{
+ struct otx2_cq_poll *cq_poll;
+ int workdone = 0, cq_idx, i;
+ struct otx2_cq_queue *cq;
+ struct otx2_qset *qset;
+ struct otx2_nic *pfvf;
+
+ cq_poll = container_of(napi, struct otx2_cq_poll, napi);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ qset = &pfvf->qset;
+
+ for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ cq_idx = cq_poll->cq_ids[i];
+ if (unlikely(cq_idx == CINT_INVALID_CQ))
+ continue;
+ cq = &qset->cq[cq_idx];
+ if (cq->cq_type == CQ_RX) {
+ /* If the RQ refill WQ task is running, skip napi
+ * scheduler for this queue.
+ */
+ if (cq->refill_task_sched)
+ continue;
+ workdone += otx2_rx_napi_handler(pfvf, napi,
+ cq, budget);
+ } else {
+ workdone += otx2_tx_napi_handler(pfvf, cq, budget);
+ }
+ }
+
+ /* Clear the IRQ */
+ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
+
+ if (workdone < budget && napi_complete_done(napi, workdone)) {
+ /* If interface is going down, don't re-enable IRQ */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return workdone;
+
+ /* Re-enable interrupts */
+ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
+ return workdone;
+}
+
+static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+#define MAX_SEGS_PER_SG 3
+/* Add SQE scatter/gather subdescriptor structure */
+static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+/* Add SQE extended header subdescriptor */
+static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int *offset)
+{
+ struct nix_sqe_ext_s *ext;
+
+ ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
+ ext->subdc = NIX_SUBDC_EXT;
+ if (skb_shinfo(skb)->gso_size) {
+ ext->lso = 1;
+ ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_mps = skb_shinfo(skb)->gso_size;
+
+ /* Only TSOv4 and TSOv6 GSO offloads are supported */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ ext->lso_format = pfvf->hw.lso_tsov4_idx;
+
+ /* HW adds payload size to 'ip_hdr->tot_len' while
+ * sending TSO segment, hence set payload length
+ * in IP header of the packet to just header length.
+ */
+ ip_hdr(skb)->tot_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ } else {
+ ext->lso_format = pfvf->hw.lso_tsov6_idx;
+ ipv6_hdr(skb)->payload_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ }
+ }
+ *offset += sizeof(*ext);
+}
+
+/* Add SQE header subdescriptor structure */
+static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct nix_sqe_hdr_s *sqe_hdr,
+ struct sk_buff *skb, u16 qidx)
+{
+ int proto = 0;
+
+ /* Check if SQE was framed before, if yes then no need to
+ * set these constants again and again.
+ */
+ if (!sqe_hdr->total) {
+ /* Don't free Tx buffers to Aura */
+ sqe_hdr->df = 1;
+ sqe_hdr->aura = sq->aura_id;
+ /* Post a CQE Tx after pkt transmission */
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sq = qidx;
+ }
+ sqe_hdr->total = skb->len;
+ /* Set SQE identifier which will be used later for freeing SKB */
+ sqe_hdr->sqe_id = sq->head;
+
+ /* Offload TCP/UDP checksum to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ sqe_hdr->ol3ptr = skb_network_offset(skb);
+ sqe_hdr->ol4ptr = skb_transport_offset(skb);
+ /* get vlan protocol Ethertype */
+ if (eth_type_vlan(skb->protocol))
+ skb->protocol = vlan_get_protocol(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ proto = ip_hdr(skb)->protocol;
+ /* In case of TSO, HW needs this to be explicitly set.
+ * So set this always, instead of adding a check.
+ */
+ sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+
+ if (proto == IPPROTO_TCP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ else if (proto == IPPROTO_UDP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
+ }
+}
+
+static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int sqe, int hdr_len)
+{
+ int num_segs = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_list *sg = &sq->sg[sqe];
+ u64 dma_addr;
+ int seg, len;
+
+ sg->num_segs = 0;
+
+ /* Get payload length at skb->data */
+ len = skb_headlen(skb) - hdr_len;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ /* Skip skb->data, if there is no payload */
+ if (!seg && !len)
+ continue;
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ goto unmap;
+
+ /* Save DMA mapping info for later unmapping */
+ sg->dma_addr[sg->num_segs] = dma_addr;
+ sg->size[sg->num_segs] = len;
+ sg->num_segs++;
+ }
+ return 0;
+unmap:
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ return -EINVAL;
+}
+
+static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int seg,
+ u64 seg_addr, int hdr_len, int sqe)
+{
+ struct sg_list *sg = &sq->sg[sqe];
+ const skb_frag_t *frag;
+ int offset;
+
+ if (seg < 0)
+ return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
+
+ frag = &skb_shinfo(skb)->frags[seg];
+ offset = seg_addr - (u64)skb_frag_address(frag);
+ if (skb_headlen(skb) - hdr_len)
+ seg++;
+ return sg->dma_addr[seg] + offset;
+}
+
+static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
+ struct sg_list *list, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u16 *sg_lens = NULL;
+ u64 *iova = NULL;
+ int seg;
+
+ /* Add SG descriptors with buffer addresses */
+ for (seg = 0; seg < list->num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
+ *iova++ = list->dma_addr[seg];
+ sg->segs++;
+ }
+}
+
+static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_data, seg_len, pkt_len, offset;
+ struct nix_sqe_hdr_s *sqe_hdr;
+ int first_sqe = sq->head;
+ struct sg_list list;
+ struct tso_t tso;
+
+ /* Map SKB's fragments to DMA.
+ * It's done here to avoid mapping for every TSO segment's packet.
+ */
+ if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ tso_start(skb, &tso);
+ tcp_data = skb->len - hdr_len;
+ while (tcp_data > 0) {
+ char *hdr;
+
+ seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
+ tcp_data -= seg_len;
+
+ /* Set SQE's SEND_HDR */
+ memset(sq->sqe_base, 0, sq->sqe_size);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add TSO segment's pkt header */
+ hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
+ tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
+ list.dma_addr[0] =
+ sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
+ list.size[0] = hdr_len;
+ list.num_segs = 1;
+
+ /* Add TSO segment's payload data fragments */
+ pkt_len = hdr_len;
+ while (seg_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, seg_len);
+
+ list.size[list.num_segs] = size;
+ list.dma_addr[list.num_segs] =
+ otx2_tso_frag_dma_addr(sq, skb,
+ tso.next_frag_idx - 1,
+ (u64)tso.data, hdr_len,
+ first_sqe);
+ list.num_segs++;
+ pkt_len += size;
+ seg_len -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ sqe_hdr->total = pkt_len;
+ otx2_sqe_tso_add_sg(sq, &list, &offset);
+
+ /* DMA mappings and skb needs to be freed only after last
+ * TSO segment is transmitted out. So set 'PNC' only for
+ * last segment. Also point last segment's sqe_id to first
+ * segment's SQE index where skb address and DMA mappings
+ * are saved.
+ */
+ if (!tcp_data) {
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sqe_id = first_sqe;
+ sq->sg[first_sqe].skb = (u64)skb;
+ } else {
+ sqe_hdr->pnc = 0;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ /* Flush SQE to HW */
+ otx2_sqe_flush(sq, offset);
+ }
+}
+
+static bool is_hw_tso_supported(struct otx2_nic *pfvf,
+ struct sk_buff *skb)
+{
+ int payload_len, last_seg_size;
+
+ if (!pfvf->hw.hw_tso)
+ return false;
+
+ /* HW has an issue due to which when the payload of the last LSO
+ * segment is shorter than 16 bytes, some header fields may not
+ * be correctly modified, hence don't offload such TSO segments.
+ */
+ if (!is_96xx_B0(pfvf->pdev))
+ return true;
+
+ payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
+ if (last_seg_size && last_seg_size < 16)
+ return false;
+
+ return true;
+}
+
+static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
+{
+ if (!skb_shinfo(skb)->gso_size)
+ return 1;
+
+ /* HW TSO */
+ if (is_hw_tso_supported(pfvf, skb))
+ return 1;
+
+ /* SW TSO */
+ return skb_shinfo(skb)->gso_segs;
+}
+
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int offset, num_segs, free_sqe;
+ struct nix_sqe_hdr_s *sqe_hdr;
+
+ /* Check if there is room for new SQE.
+ * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
+ * will give free SQE count.
+ */
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+
+ if (free_sqe < sq->sqe_thresh ||
+ free_sqe < otx2_get_sqe_count(pfvf, skb))
+ return false;
+
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+
+ /* If SKB doesn't fit in a single SQE, linearize it.
+ * TODO: Consider adding JUMP descriptor instead.
+ */
+ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+ }
+
+ if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ return true;
+ }
+
+ /* Set SQE's SEND_HDR.
+ * Do not clear the first 64bit as it contains constant info.
+ */
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add extended header if needed */
+ otx2_sqe_add_ext(pfvf, sq, skb, &offset);
+
+ /* Add SG subdesc with data frags */
+ if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
+ return false;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Flush SQE to HW */
+ otx2_sqe_flush(sq, offset);
+
+ return true;
+}
+
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+ u64 iova, pa;
+
+ while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
+ if (!cqe->sg.subdc)
+ continue;
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+
+ sq = &pfvf->qset.sq[cq->cint_idx];
+
+ while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
new file mode 100644
index 000000000000..4ab32d3adb78
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_TXRX_H
+#define OTX2_TXRX_H
+
+#include <linux/etherdevice.h>
+#include <linux/iommu.h>
+#include <linux/if_vlan.h>
+
+#define LBK_CHAN_BASE 0x000
+#define SDP_CHAN_BASE 0x700
+#define CGX_CHAN_BASE 0x800
+
+#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN)
+#define OTX2_HEAD_ROOM OTX2_ALIGN
+
+#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
+#define OTX2_MIN_MTU 64
+#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
+
+#define OTX2_MAX_GSO_SEGS 255
+#define OTX2_MAX_FRAGS_IN_SQE 9
+
+/* Rx buffer size should be in multiples of 128bytes */
+#define RCV_FRAG_LEN1(x) \
+ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* Prefer 2048 byte buffers for better last level cache
+ * utilization or data distribution across regions.
+ */
+#define RCV_FRAG_LEN(x) \
+ ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
+
+#define DMA_BUFFER_LEN(x) \
+ ((x) - OTX2_HEAD_ROOM - \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is equal to this value.
+ */
+#define CQ_CQE_THRESH_DEFAULT 10
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is nonzero and this much time elapses after that.
+ */
+#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */
+#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */
+
+/* Min number of CQs (of the ones mapped to this CINT)
+ * with valid CQEs.
+ */
+#define CQ_QCOUNT_DEFAULT 1
+
+struct queue_stats {
+ u64 bytes;
+ u64 pkts;
+};
+
+struct otx2_rcv_queue {
+ struct queue_stats stats;
+};
+
+struct sg_list {
+ u16 num_segs;
+ u64 skb;
+ u64 size[OTX2_MAX_FRAGS_IN_SQE];
+ u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
+};
+
+struct otx2_snd_queue {
+ u8 aura_id;
+ u16 head;
+ u16 sqe_size;
+ u32 sqe_cnt;
+ u16 num_sqbs;
+ u16 sqe_thresh;
+ u8 sqe_per_sqb;
+ u64 io_addr;
+ u64 *aura_fc_addr;
+ u64 *lmt_addr;
+ void *sqe_base;
+ struct qmem *sqe;
+ struct qmem *tso_hdrs;
+ struct sg_list *sg;
+ struct queue_stats stats;
+ u16 sqb_count;
+ u64 *sqb_ptrs;
+} ____cacheline_aligned_in_smp;
+
+enum cq_type {
+ CQ_RX,
+ CQ_TX,
+ CQS_PER_CINT = 2, /* RQ + SQ */
+};
+
+struct otx2_cq_poll {
+ void *dev;
+#define CINT_INVALID_CQ 255
+ u8 cint_idx;
+ u8 cq_ids[CQS_PER_CINT];
+ struct napi_struct napi;
+};
+
+struct otx2_pool {
+ struct qmem *stack;
+ struct qmem *fc_addr;
+ u8 rbpage_order;
+ u16 rbsize;
+ u32 page_offset;
+ u16 pageref;
+ struct page *page;
+};
+
+struct otx2_cq_queue {
+ u8 cq_idx;
+ u8 cq_type;
+ u8 cint_idx; /* CQ interrupt id */
+ u8 refill_task_sched;
+ u16 cqe_size;
+ u16 pool_ptrs;
+ u32 cqe_cnt;
+ u32 cq_head;
+ void *cqe_base;
+ struct qmem *cqe;
+ struct otx2_pool *rbpool;
+} ____cacheline_aligned_in_smp;
+
+struct otx2_qset {
+ u32 rqe_cnt;
+ u32 sqe_cnt; /* Keep these two at top */
+#define OTX2_MAX_CQ_CNT 64
+ u16 cq_cnt;
+ u16 xqe_size;
+ struct otx2_pool *pool;
+ struct otx2_cq_poll *napi;
+ struct otx2_cq_queue *cq;
+ struct otx2_snd_queue *sq;
+ struct otx2_rcv_queue *rq;
+};
+
+/* Translate IOVA to physical address */
+static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (likely(iommu_domain))
+ return iommu_iova_to_phys(iommu_domain, dma_addr);
+ return dma_addr;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget);
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx);
+#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3fb7ee3d4d13..7a0d785b826c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -742,7 +742,7 @@ txq_reclaim_end:
return released;
}
-static void pxa168_eth_tx_timeout(struct net_device *dev)
+static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1344,15 +1344,6 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
return 0;
}
-static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- if (dev->phydev)
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-
- return -EOPNOTSUPP;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pxa168_eth_netpoll(struct net_device *dev)
{
@@ -1387,7 +1378,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = {
.ndo_set_rx_mode = pxa168_eth_set_rx_mode,
.ndo_set_mac_address = pxa168_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_change_mtu = pxa168_eth_change_mtu,
.ndo_tx_timeout = pxa168_eth_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 095f6c71b4fa..97f270d30cce 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2884,7 +2884,7 @@ static void skge_tx_clean(struct net_device *dev)
skge->tx_ring.to_clean = e;
}
-static void skge_tx_timeout(struct net_device *dev)
+static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct skge_port *skge = netdev_priv(dev);
@@ -3932,7 +3932,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&hw->phy_lock);
tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
- hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_out_free_hw;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 5f56ee83e3b1..ebfd0ceac884 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2358,7 +2358,7 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
/* Transmit timeout is only called if we are running, carrier is up
* and tx queue is full (stopped).
*/
-static void sky2_tx_timeout(struct net_device *dev)
+static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -5022,7 +5022,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
- hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_out_free_hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 527ad2aadcca..8c6cfd15481c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2081,7 +2081,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
kfree(eth->scratch_head);
}
-static void mtk_tx_timeout(struct net_device *dev)
+static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index eaf08f7ad128..64ed725aec28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
crdump_enable_crspace_access(dev, cr_space);
/* Get the available snapshot ID for the dumps */
- id = devlink_region_shapshot_id_get(devlink);
+ id = devlink_region_snapshot_id_get(devlink);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7af75b63245f..43dcbd8214c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1363,24 +1363,18 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
}
}
-static void mlx4_en_tx_timeout(struct net_device *dev)
+static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int i;
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
-
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
- continue;
- en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, tx_ring->qpn, tx_ring->sp_cqn,
- tx_ring->cons, tx_ring->prod);
- }
+ en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
+ txqueue, tx_ring->qpn, tx_ring->sp_cqn,
+ tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Scheduling watchdog\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a6f390fdb971..d3e06cec8317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o
+ ecpf.o rdma.o eswitch_offloads_chains.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 549f962cd86e..42198e64a7f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,8 +71,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
+static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 9c8427698238..220ef9f06f84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -135,7 +135,7 @@ struct page_pool;
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
@@ -892,6 +892,8 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
+ mlx5e_stats_grp_t *stats_grps;
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
@@ -964,7 +966,6 @@ struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
-void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1175,11 +1176,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
- u16 max_channels, u16 mtu);
+ u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index d48292ccda29..0416f7712109 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -21,6 +21,7 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 475b6bd5d29b..62fc8a128a8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -35,7 +35,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
*/
dma_info->addr = xdp_umem_get_dma(umem, handle);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 778dab1af8fc..f260dd96873b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct tx_sync_info {
u64 rcd_sn;
- s32 sync_len;
+ u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+ u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
+ bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto out;
}
- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- ret = tls_record_is_start_marker(record) ?
- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ /* There are the following cases:
+ * 1. packet ends before start marker: bypass offload.
+ * 2. packet starts before start marker and ends after it: drop,
+ * not supported, breaks contract with kernel.
+ * 3. packet ends before tls record info starts: drop,
+ * this packet was already acknowledged and its record info
+ * was released.
+ */
+ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ } else if (ends_before) {
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u8 num_wqebbs;
int i = 0;
- ret = tx_sync_info_get(priv_tx, seq, &info);
+ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto err_out;
}
- if (unlikely(info.sync_len < 0)) {
- if (likely(datalen <= -info.sync_len))
- return MLX5E_KTLS_SYNC_DONE;
-
- stats->tls_drop_bypass_req++;
- goto err_out;
- }
-
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+ switch (ret) {
+ case MLX5E_KTLS_SYNC_DONE:
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- else if (ret == MLX5E_KTLS_SYNC_FAIL)
+ break;
+ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ if (likely(!skb->decrypted))
+ goto out;
+ WARN_ON_ONCE(1);
+ /* fall-through */
+ default: /* MLX5E_KTLS_SYNC_FAIL */
goto err_out;
- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
- goto out;
+ }
}
priv_tx->expected_seq = seq + datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c6776f308d5e..d674cb679895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -218,13 +218,9 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
- int i, num_stats = 0;
-
switch (sset) {
case ETH_SS_STATS:
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
- return num_stats;
+ return mlx5e_stats_total_num(priv);
case ETH_SS_PRIV_FLAGS:
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
@@ -242,14 +238,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return mlx5e_ethtool_get_sset_count(priv, sset);
}
-static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
-{
- int i, idx = 0;
-
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
-}
-
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
{
int i;
@@ -268,7 +256,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
- mlx5e_fill_stats_strings(priv, data);
+ mlx5e_stats_fill_strings(priv, data);
break;
}
}
@@ -283,14 +271,13 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- int i, idx = 0;
+ int idx = 0;
mutex_lock(&priv->state_lock);
- mlx5e_update_stats(priv);
+ mlx5e_stats_update(priv);
mutex_unlock(&priv->state_lock);
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
+ mlx5e_stats_fill(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index acd946f2ddbe..3bc2ac3d53fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES);
- ft = mlx5_create_auto_grouped_flow_table(ns, prio,
- table_size,
- MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
+
+ ft_attr.prio = prio;
+ ft_attr.max_fte = table_size;
+ ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return (void *)ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4997b8a51994..454d3459bd8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -159,23 +159,14 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
- if (mlx5e_stats_grps[i].update_stats)
- mlx5e_stats_grps[i].update_stats(priv);
-}
-
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
- for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
- if (mlx5e_stats_grps[i].update_stats_mask &
+ for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
+ if (mlx5e_nic_stats_grps[i]->update_stats_mask &
MLX5E_NDO_UPDATE_STATS)
- mlx5e_stats_grps[i].update_stats(priv);
+ mlx5e_nic_stats_grps[i]->update_stats(priv);
}
static void mlx5e_update_stats_work(struct work_struct *work)
@@ -4325,7 +4316,7 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_tx_timeout(struct net_device *dev)
+static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -4739,17 +4730,19 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
tirc_default_config[tt].rx_hash_fields;
}
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
- u16 max_channels, u16 mtu)
+ u16 mtu)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
- params->num_channels = max_channels;
+ params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
+ priv->max_nch);
params->num_tc = 1;
/* SQ */
@@ -4876,6 +4869,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
@@ -4986,8 +4981,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
return err;
- mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
- priv->max_nch, netdev->mtu);
+ mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
+ netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -5149,6 +5144,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5169,7 +5165,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove(mdev, netdev);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5193,6 +5189,8 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
+ .stats_grps = mlx5e_nic_stats_grps,
+ .stats_grps_num = mlx5e_nic_stats_grps_num,
};
/* mlx5e generic netdev management API (move to en_common.c) */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f175cb24bb67..7b48ccacebe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -41,6 +41,7 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -116,24 +117,71 @@ static const struct counter_desc vport_rep_stats_desc[] = {
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
-static void mlx5e_rep_get_strings(struct net_device *dev,
- u32 stringset, uint8_t *data)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
- int i, j;
+ return NUM_VPORT_REP_SW_COUNTERS;
+}
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- strcpy(data + (i * ETH_GSTRING_LEN),
- sw_rep_stats_desc[i].format);
- for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
- strcpy(data + (i * ETH_GSTRING_LEN),
- vport_rep_stats_desc[j].format);
- break;
- }
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ sw_rep_stats_desc[i].format);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
+{
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
+ struct rtnl_link_stats64 stats64 = {};
+
+ memset(s, 0, sizeof(*s));
+ mlx5e_fold_sw_stats64(priv, &stats64);
+
+ s->rx_packets = stats64.rx_packets;
+ s->rx_bytes = stats64.rx_bytes;
+ s->tx_packets = stats64.tx_packets;
+ s->tx_bytes = stats64.tx_bytes;
+ s->tx_queue_dropped = stats64.tx_dropped;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
+{
+ return NUM_VPORT_REP_HW_COUNTERS;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ return idx;
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ vport_rep_stats_desc, i);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -156,64 +204,33 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
-static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct rtnl_link_stats64 *vport_stats;
-
- mlx5e_grp_802_3_update_stats(priv);
-
- vport_stats = &priv->stats.vf_vport;
-
- vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
- vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
- vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
- vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
-}
-
-static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
+static void mlx5e_rep_get_strings(struct net_device *dev,
+ u32 stringset, uint8_t *data)
{
- struct mlx5e_sw_stats *s = &priv->stats.sw;
- struct rtnl_link_stats64 stats64 = {};
-
- memset(s, 0, sizeof(*s));
- mlx5e_fold_sw_stats64(priv, &stats64);
+ struct mlx5e_priv *priv = netdev_priv(dev);
- s->rx_packets = stats64.rx_packets;
- s->rx_bytes = stats64.rx_bytes;
- s->tx_packets = stats64.tx_packets;
- s->tx_bytes = stats64.tx_bytes;
- s->tx_queue_dropped = stats64.tx_dropped;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ mlx5e_stats_fill_strings(priv, data);
+ break;
+ }
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i, j;
-
- if (!data)
- return;
-
- mutex_lock(&priv->state_lock);
- mlx5e_rep_update_sw_counters(priv);
- priv->profile->update_stats(priv);
- mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_rep_stats_desc, i);
-
- for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
- data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
- vport_rep_stats_desc, j);
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
+ return mlx5e_stats_total_num(priv);
default:
return -EOPNOTSUPP;
}
@@ -1247,8 +1264,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- struct flow_cls_offload *f = type_data;
- struct flow_cls_offload cls_flower;
+ struct flow_cls_offload tmp, *f = type_data;
struct mlx5e_priv *priv = cb_priv;
struct mlx5_eswitch *esw;
unsigned long flags;
@@ -1261,16 +1277,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
+ memcpy(&tmp, f, sizeof(*f));
+
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.chain_index)
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
* reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
*/
- memcpy(&cls_flower, f, sizeof(*f));
- cls_flower.common.chain_index = FDB_FT_CHAIN;
- err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
- memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
+ if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ return -EOPNOTSUPP;
+ if (tmp.common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
return err;
default:
return -EOPNOTSUPP;
@@ -1660,10 +1690,65 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_close_drop_rq(&priv->drop_rq);
}
+static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
+{
+ int err = mlx5e_init_rep_rx(priv);
+
+ if (err)
+ return err;
+
+ mlx5e_create_q_counters(priv);
+ return 0;
+}
+
+static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_q_counters(priv);
+ mlx5e_cleanup_rep_rx(priv);
+}
+
+static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = rpriv->netdev;
+ priv = netdev_priv(netdev);
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_init(&uplink_priv->unready_flows_lock);
+ INIT_LIST_HEAD(&uplink_priv->unready_flows);
+
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ if (err)
+ return err;
+
+ mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+ uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
+ err = register_netdevice_notifier_dev_net(rpriv->netdev,
+ &uplink_priv->netdevice_nb,
+ &uplink_priv->netdevice_nn);
+ if (err) {
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
+ goto tc_esw_cleanup;
+ }
+
+ return 0;
+
+tc_esw_cleanup:
+ mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+ return err;
+}
+
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv;
int err;
err = mlx5e_create_tises(priv);
@@ -1673,52 +1758,41 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
}
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
- uplink_priv = &rpriv->uplink_priv;
-
- mutex_init(&uplink_priv->unready_flows_lock);
- INIT_LIST_HEAD(&uplink_priv->unready_flows);
-
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
goto destroy_tises;
-
- mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
-
- /* init indirect block notifications */
- INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
- uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
- err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
- if (err) {
- mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
- goto tc_esw_cleanup;
- }
}
return 0;
-tc_esw_cleanup:
- mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
destroy_tises:
mlx5e_destroy_tises(priv);
return err;
}
+static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+
+ /* clean indirect TC block notifications */
+ unregister_netdevice_notifier_dev_net(rpriv->netdev,
+ &uplink_priv->netdevice_nb,
+ &uplink_priv->netdevice_nn);
+ mlx5e_rep_indr_clean_block_privs(rpriv);
+
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+}
+
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_destroy_tises(priv);
- if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
- /* clean indirect TC block notifications */
- unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
- mlx5e_rep_indr_clean_block_privs(rpriv);
-
- /* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
- mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
- }
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ mlx5e_cleanup_uplink_rep_tx(rpriv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1787,6 +1861,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1795,7 +1870,44 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove(mdev, netdev);
+}
+
+static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
+static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
+
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw_rep),
+ &MLX5E_STATS_GRP(vport_rep),
+};
+
+static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_rep_stats_grps);
+}
+
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(eth_ext),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
+};
+
+static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
}
static const struct mlx5e_profile mlx5e_rep_profile = {
@@ -1807,29 +1919,33 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_rep_update_hw_counters,
+ .update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5e_rep_stats_grps,
+ .stats_grps_num = mlx5e_rep_stats_grps_num,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
- .init_rx = mlx5e_init_rep_rx,
- .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_rx = mlx5e_init_ul_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_uplink_rep_update_hw_counters,
+ .update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5e_ul_rep_stats_grps,
+ .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 31f83c8adcc9..3f756d51435f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -73,6 +73,7 @@ struct mlx5_rep_uplink_priv {
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
struct mlx5_tun_entropy tun_entropy;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 9f09253f9f46..30b216d9284c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -35,6 +35,58 @@
#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
+static unsigned int stats_grps_num(struct mlx5e_priv *priv)
+{
+ return !priv->profile->stats_grps_num ? 0 :
+ priv->profile->stats_grps_num(priv);
+}
+
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ unsigned int total = 0;
+ int i;
+
+ for (i = 0; i < num_stats_grps; i++)
+ total += stats_grps[i]->get_num_stats(priv);
+
+ return total;
+}
+
+void mlx5e_stats_update(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = num_stats_grps - 1; i >= 0; i--)
+ if (stats_grps[i]->update_stats)
+ stats_grps[i]->update_stats(priv);
+}
+
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = 0; i < num_stats_grps; i++)
+ idx = stats_grps[i]->fill_stats(priv, data, idx);
+}
+
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i, idx = 0;
+
+ for (i = 0; i < num_stats_grps; i++)
+ idx = stats_grps[i]->fill_strings(priv, data, idx);
+}
+
+/* Concrete NIC Stats */
+
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
@@ -146,12 +198,12 @@ static const struct counter_desc sw_stats_desc[] = {
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
{
return NUM_SW_COUNTERS;
}
-static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
{
int i;
@@ -160,7 +212,7 @@ static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
-static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
{
int i;
@@ -169,7 +221,7 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
@@ -297,6 +349,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif
s->tx_cqes += sq_stats->cqes;
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
}
}
}
@@ -312,7 +367,7 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
-static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
@@ -325,7 +380,7 @@ static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
@@ -340,7 +395,7 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
-static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
@@ -353,7 +408,7 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
@@ -388,14 +443,13 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
-static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
}
-static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
{
int i;
@@ -409,8 +463,7 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
{
int i;
@@ -424,7 +477,7 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
@@ -487,13 +540,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
{
return NUM_VPORT_COUNTERS;
}
-static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
{
int i;
@@ -502,8 +554,7 @@ static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
{
int i;
@@ -513,7 +564,7 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
@@ -552,13 +603,12 @@ static const struct counter_desc pport_802_3_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
{
return NUM_PPORT_802_3_COUNTERS;
}
-static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
{
int i;
@@ -567,8 +617,7 @@ static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
{
int i;
@@ -581,7 +630,7 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
-void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -609,13 +658,12 @@ static const struct counter_desc pport_2863_stats_desc[] = {
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
{
return NUM_PPORT_2863_COUNTERS;
}
-static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
{
int i;
@@ -624,8 +672,7 @@ static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
{
int i;
@@ -635,7 +682,7 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -670,13 +717,12 @@ static const struct counter_desc pport_2819_stats_desc[] = {
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
{
return NUM_PPORT_2819_COUNTERS;
}
-static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
{
int i;
@@ -685,8 +731,7 @@ static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
{
int i;
@@ -696,7 +741,7 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -734,7 +779,7 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
-static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
@@ -751,8 +796,7 @@ static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
@@ -774,7 +818,7 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
@@ -800,7 +844,7 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -830,7 +874,7 @@ static const struct counter_desc pport_eth_ext_stats_desc[] = {
#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
-static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
{
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
return NUM_PPORT_ETH_EXT_COUNTERS;
@@ -838,8 +882,7 @@ static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
return 0;
}
-static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
{
int i;
@@ -850,8 +893,7 @@ static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
{
int i;
@@ -863,7 +905,7 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -904,7 +946,7 @@ static const struct counter_desc pcie_perf_stall_stats_desc[] = {
#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
-static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
{
int num_stats = 0;
@@ -920,8 +962,7 @@ static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
{
int i;
@@ -942,8 +983,7 @@ static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
{
int i;
@@ -967,7 +1007,7 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1015,8 +1055,7 @@ static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
}
-static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
- u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i, prio;
@@ -1036,8 +1075,7 @@ static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *pri
return idx;
}
-static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
- u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
{
struct mlx5e_pport_stats *pport = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1112,13 +1150,13 @@ static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
}
}
-static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
{
return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
}
-static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
{
mlx5e_grp_per_tc_prio_update_stats(priv);
mlx5e_grp_per_tc_congest_prio_update_stats(priv);
@@ -1130,6 +1168,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
@@ -1292,29 +1331,27 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
-static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
{
return mlx5e_grp_per_prio_traffic_get_num_stats() +
mlx5e_grp_per_prio_pfc_get_num_stats(priv);
}
-static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
return idx;
}
-static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
return idx;
}
-static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1349,13 +1386,12 @@ static const struct counter_desc mlx5e_pme_error_desc[] = {
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
-static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
{
return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
}
-static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
{
int i;
@@ -1368,8 +1404,7 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
{
struct mlx5_pme_stats pme_stats;
int i;
@@ -1387,45 +1422,46 @@ static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
{
return mlx5e_ipsec_get_count(priv);
}
-static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
{
return idx + mlx5e_ipsec_get_strings(priv,
data + idx * ETH_GSTRING_LEN);
}
-static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
{
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
-static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
{
mlx5e_ipsec_update_stats(priv);
}
-static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
}
-static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
-static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
return idx + mlx5e_tls_get_stats(priv, data + idx);
}
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -1559,7 +1595,7 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
-static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
int max_nch = priv->max_nch;
@@ -1572,8 +1608,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
(NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
}
-static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
@@ -1615,8 +1650,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
@@ -1664,104 +1698,46 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
+
+MLX5E_DEFINE_STATS_GRP(sw, 0);
+MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
+MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(2863, 0);
+MLX5E_DEFINE_STATS_GRP(2819, 0);
+MLX5E_DEFINE_STATS_GRP(phy, 0);
+MLX5E_DEFINE_STATS_GRP(pcie, 0);
+MLX5E_DEFINE_STATS_GRP(per_prio, 0);
+MLX5E_DEFINE_STATS_GRP(pme, 0);
+MLX5E_DEFINE_STATS_GRP(channels, 0);
+MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
+MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
+static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
+static MLX5E_DEFINE_STATS_GRP(tls, 0);
+
/* The stats groups order is opposite to the update_stats() order calls */
-const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
- {
- .get_num_stats = mlx5e_grp_sw_get_num_stats,
- .fill_strings = mlx5e_grp_sw_fill_strings,
- .fill_stats = mlx5e_grp_sw_fill_stats,
- .update_stats = mlx5e_grp_sw_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_q_get_num_stats,
- .fill_strings = mlx5e_grp_q_fill_strings,
- .fill_stats = mlx5e_grp_q_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_q_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
- .fill_strings = mlx5e_grp_vnic_env_fill_strings,
- .fill_stats = mlx5e_grp_vnic_env_fill_stats,
- .update_stats = mlx5e_grp_vnic_env_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_vport_get_num_stats,
- .fill_strings = mlx5e_grp_vport_fill_strings,
- .fill_stats = mlx5e_grp_vport_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_vport_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_802_3_get_num_stats,
- .fill_strings = mlx5e_grp_802_3_fill_strings,
- .fill_stats = mlx5e_grp_802_3_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_802_3_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_2863_get_num_stats,
- .fill_strings = mlx5e_grp_2863_fill_strings,
- .fill_stats = mlx5e_grp_2863_fill_stats,
- .update_stats = mlx5e_grp_2863_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_2819_get_num_stats,
- .fill_strings = mlx5e_grp_2819_fill_strings,
- .fill_stats = mlx5e_grp_2819_fill_stats,
- .update_stats = mlx5e_grp_2819_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_phy_get_num_stats,
- .fill_strings = mlx5e_grp_phy_fill_strings,
- .fill_stats = mlx5e_grp_phy_fill_stats,
- .update_stats = mlx5e_grp_phy_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
- .fill_strings = mlx5e_grp_eth_ext_fill_strings,
- .fill_stats = mlx5e_grp_eth_ext_fill_stats,
- .update_stats = mlx5e_grp_eth_ext_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_pcie_get_num_stats,
- .fill_strings = mlx5e_grp_pcie_fill_strings,
- .fill_stats = mlx5e_grp_pcie_fill_stats,
- .update_stats = mlx5e_grp_pcie_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_fill_stats,
- .update_stats = mlx5e_grp_per_prio_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_pme_get_num_stats,
- .fill_strings = mlx5e_grp_pme_fill_strings,
- .fill_stats = mlx5e_grp_pme_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
- .fill_strings = mlx5e_grp_ipsec_fill_strings,
- .fill_stats = mlx5e_grp_ipsec_fill_stats,
- .update_stats = mlx5e_grp_ipsec_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_tls_get_num_stats,
- .fill_strings = mlx5e_grp_tls_fill_strings,
- .fill_stats = mlx5e_grp_tls_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_channels_get_num_stats,
- .fill_strings = mlx5e_grp_channels_fill_strings,
- .fill_stats = mlx5e_grp_channels_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
- .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
- .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
- .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
- },
+mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(eth_ext),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(ipsec),
+ &MLX5E_STATS_GRP(tls),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
};
-const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_nic_stats_grps);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 869f3502f631..092b39ffa32a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
#ifndef __MLX5_EN_STATS_H__
#define __MLX5_EN_STATS_H__
@@ -55,6 +56,56 @@ struct counter_desc {
size_t offset; /* Byte offset */
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ u16 update_stats_mask;
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
+};
+
+typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
+
+#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
+
+#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
+ void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
+
+#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
+
+#define MLX5E_DECLARE_STATS_GRP(grp) \
+ const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
+
+#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
+MLX5E_DECLARE_STATS_GRP(grp) = { \
+ .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
+ .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
+ .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
+ .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
+ .update_stats_mask = mask, \
+}
+
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
+void mlx5e_stats_update(struct mlx5e_priv *priv);
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
+
+/* Concrete NIC Stats */
+
struct mlx5e_sw_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -322,22 +373,22 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-enum {
- MLX5E_NDO_UPDATE_STATS = BIT(0x1),
-};
-
-struct mlx5e_priv;
-struct mlx5e_stats_grp {
- u16 update_stats_mask;
- int (*get_num_stats)(struct mlx5e_priv *priv);
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
- void (*update_stats)(struct mlx5e_priv *priv);
-};
-
-extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
-extern const int mlx5e_num_stats_grps;
+extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
-void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
+extern MLX5E_DECLARE_STATS_GRP(sw);
+extern MLX5E_DECLARE_STATS_GRP(qcnt);
+extern MLX5E_DECLARE_STATS_GRP(vnic_env);
+extern MLX5E_DECLARE_STATS_GRP(vport);
+extern MLX5E_DECLARE_STATS_GRP(802_3);
+extern MLX5E_DECLARE_STATS_GRP(2863);
+extern MLX5E_DECLARE_STATS_GRP(2819);
+extern MLX5E_DECLARE_STATS_GRP(phy);
+extern MLX5E_DECLARE_STATS_GRP(eth_ext);
+extern MLX5E_DECLARE_STATS_GRP(pcie);
+extern MLX5E_DECLARE_STATS_GRP(per_prio);
+extern MLX5E_DECLARE_STATS_GRP(pme);
+extern MLX5E_DECLARE_STATS_GRP(channels);
+extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 024e1cddfd0e..74091f72c9a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,6 +51,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -960,7 +961,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
- int tc_grp_size, tc_tbl_size;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int tc_grp_size, tc_tbl_size, tc_num_grps;
u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
@@ -970,13 +972,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+ tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
+ ft_attr.prio = MLX5E_TC_PRIO;
+ ft_attr.max_fte = tc_tbl_size;
+ ft_attr.level = MLX5E_TC_FT_LEVEL;
+ ft_attr.autogroup.max_num_groups = tc_num_grps;
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
- MLX5E_TC_PRIO,
- tc_tbl_size,
- MLX5E_TC_TABLE_NUM_GROUPS,
- MLX5E_TC_FT_LEVEL, 0);
+ &ft_attr);
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
@@ -1080,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1097,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1157,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
- u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
bool encap_valid = true;
+ u32 max_prio, max_chain;
int err = 0;
int out_index;
- if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+ if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@@ -1179,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
+ max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
+ max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
return -EOPNOTSUPP;
@@ -1805,6 +1810,40 @@ static void *get_match_headers_value(u32 flags,
outer_headers);
}
+static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return 0;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(dev_net(filter_dev),
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't find the ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (ingress_dev != filter_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't match on the ingress filter port");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
@@ -1825,6 +1864,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
+ int err;
match_level = outer_match_level;
@@ -1868,6 +1908,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
spec);
}
+ err = mlx5e_flower_parse_meta(filter_dev, f);
+ if (err)
+ return err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -2842,6 +2886,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -3462,7 +3510,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_GOTO: {
u32 dest_chain = act->chain_index;
- u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+ u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
@@ -4036,6 +4084,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
u32 rate_mbps;
int err;
+ vport_num = rpriv->rep->vport;
+ if (vport_num >= MLX5_VPORT_ECPF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+ return -EOPNOTSUPP;
+ }
+
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
@@ -4044,8 +4099,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
- vport_num = rpriv->rep->vport;
-
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
@@ -4198,7 +4251,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
return err;
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
- if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ err = register_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
+ if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
}
@@ -4220,7 +4276,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
if (tc->netdevice_nb.notifier_call)
- unregister_netdevice_notifier(&tc->netdevice_nb);
+ unregister_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
mutex_destroy(&tc->mod_hdr.lock);
mutex_destroy(&tc->hairpin_tbl_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 580c71cb9dfa..cccea3a8eddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -156,7 +156,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
- mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
+ dev_dbg_ratelimited(eq->dev->device,
+ "Completion event for bogus CQ 0x%x\n", cqn);
}
++eq->cons_index;
@@ -563,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
gather_user_async_events(dev, mask);
}
+static int
+setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
+ struct mlx5_eq_param *param, const char *name)
+{
+ int err;
+
+ eq->irq_nb.notifier_call = mlx5_eq_async_int;
+
+ err = create_async_eq(dev, &eq->core, param);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
+ return err;
+ }
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
+ if (err) {
+ mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
+ destroy_async_eq(dev, &eq->core);
+ }
+ return err;
+}
+
+static void cleanup_async_eq(struct mlx5_core_dev *dev,
+ struct mlx5_eq_async *eq, const char *name)
+{
+ int err;
+
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
+ err = destroy_async_eq(dev, &eq->core);
+ if (err)
+ mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
+ name, err);
+}
+
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -572,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
- table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
+ .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
-
- param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
- err = create_async_eq(dev, &table->cmd_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
- goto err0;
- }
- err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
+ err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
+ if (err)
goto err1;
- }
+
mlx5_cmd_use_events(dev);
- table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
};
gather_async_events_mask(dev, param.mask);
- err = create_async_eq(dev, &table->async_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
+ err = setup_async_eq(dev, &table->async_eq, &param, "async");
+ if (err)
goto err2;
- }
- err = mlx5_eq_enable(dev, &table->async_eq.core,
- &table->async_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
- goto err3;
- }
- table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
+ .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
- param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
- err = create_async_eq(dev, &table->pages_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
- goto err4;
- }
- err = mlx5_eq_enable(dev, &table->pages_eq.core,
- &table->pages_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
- goto err5;
- }
+ err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
+ if (err)
+ goto err3;
- return err;
+ return 0;
-err5:
- destroy_async_eq(dev, &table->pages_eq.core);
-err4:
- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err3:
- destroy_async_eq(dev, &table->async_eq.core);
+ cleanup_async_eq(dev, &table->async_eq, "async");
err2:
mlx5_cmd_use_polling(dev);
- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
+ cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
- destroy_async_eq(dev, &table->cmd_eq.core);
-err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
@@ -650,28 +652,11 @@ err0:
static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int err;
-
- mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
- err = destroy_async_eq(dev, &table->pages_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
- err);
-
- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
- err = destroy_async_eq(dev, &table->async_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
- err);
+ cleanup_async_eq(dev, &table->pages_eq, "pages");
+ cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_use_polling(dev);
-
- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
- err = destroy_async_eq(dev, &table->cmd_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
- err);
-
+ cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2c965ad0d744..5acf60b1bbfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -277,6 +277,7 @@ enum {
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
}
/* num FTE 2, num FG 2 */
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
- 2, 2, 0, 0);
+ ft_attr.prio = LEGACY_VEPA_PRIO;
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
@@ -1928,8 +1931,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int i;
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->info, 0, sizeof(vport->info));
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ }
}
/* Public E-Switch API */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ffcff3ba3701..4472710ccc9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -157,7 +157,7 @@ enum offloads_fdb_flags {
ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
};
-extern const unsigned int ESW_POOLS[4];
+struct mlx5_esw_chains_priv;
struct mlx5_eswitch_fdb {
union {
@@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
- struct {
- struct mlx5_flow_table *fdb;
- u32 num_rules;
- } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
- /* Protects fdb_prio table */
- struct mutex fdb_prio_lock;
-
- int fdb_left[ARRAY_SIZE(ESW_POOLS)];
+ struct mlx5_esw_chains_priv *esw_chains_priv;
} offloads;
};
u32 flags;
@@ -355,15 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
-bool
-mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
-
-u16
-mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
-
-u32
-mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
-
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest);
@@ -388,6 +372,11 @@ enum {
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
};
+enum {
+ MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
+ MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
@@ -401,7 +390,6 @@ struct mlx5_esw_flow_attr {
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
- bool vlan_handled;
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
@@ -416,6 +404,7 @@ struct mlx5_esw_flow_attr {
u32 chain;
u16 prio;
u32 dest_chain;
+ u32 flags;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 243a5440867e..979f13bdc203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -47,10 +48,6 @@
* one for multicast.
*/
#define MLX5_ESW_MISS_FLOWS (2)
-
-#define fdb_prio_table(esw, chain, prio, level) \
- (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
-
#define UPLINK_REP_INDEX 0
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
@@ -62,32 +59,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
-static struct mlx5_flow_table *
-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
-static void
-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
-
-bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
-{
- return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
-}
-
-u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_TC_MAX_CHAIN;
-
- return 0;
-}
-
-u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_TC_MAX_PRIO;
-
- return 1;
-}
-
static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
@@ -175,10 +146,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- if (attr->dest_chain) {
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *ft;
- ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
+ i++;
+ } else if (attr->dest_chain) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
+ 1, 0);
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_create_goto_table;
@@ -223,7 +201,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
+ fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
+ !!split);
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@@ -242,10 +221,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
- if (attr->dest_chain)
- esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
+ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
err_create_goto_table:
return rule;
}
@@ -262,13 +241,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule;
int i;
- fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
+ fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
- fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
+ fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -296,6 +275,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
@@ -305,9 +285,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
err_get_fwd:
- esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
return rule;
}
@@ -332,12 +312,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
- esw_put_prio_table(esw, attr->chain, attr->prio, 1);
- esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
- esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
+ !!split);
if (attr->dest_chain)
- esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
}
@@ -451,7 +432,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (err)
goto unlock;
- attr->vlan_handled = false;
+ attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
vport = esw_vlan_action_get_vport(attr, push, pop);
@@ -459,7 +440,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
- attr->vlan_handled = true;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
}
goto unlock;
@@ -490,7 +471,7 @@ skip_set_push:
}
out:
if (!err)
- attr->vlan_handled = true;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
unlock:
mutex_unlock(&esw->state_lock);
return err;
@@ -508,7 +489,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
- if (!attr->vlan_handled)
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -582,8 +563,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
@@ -824,8 +805,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
@@ -839,8 +820,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
@@ -855,174 +836,6 @@ out:
return err;
}
-#define ESW_OFFLOADS_NUM_GROUPS 4
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via put/get_sz_to_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small
- */
-#define ESW_SIZE (16 * 1024 * 1024)
-const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
- 64 * 1024, 4 * 1024 };
-
-static int
-get_sz_from_pool(struct mlx5_eswitch *esw)
-{
- int sz = 0, i;
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
- if (esw->fdb_table.offloads.fdb_left[i]) {
- --esw->fdb_table.offloads.fdb_left[i];
- sz = ESW_POOLS[i];
- break;
- }
- }
-
- return sz;
-}
-
-static void
-put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
- if (sz >= ESW_POOLS[i]) {
- ++esw->fdb_table.offloads.fdb_left[i];
- break;
- }
- }
-}
-
-static struct mlx5_flow_table *
-create_next_size_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_namespace *ns,
- u16 table_prio,
- int level,
- u32 flags)
-{
- struct mlx5_flow_table *fdb;
- int sz;
-
- sz = get_sz_from_pool(esw);
- if (!sz)
- return ERR_PTR(-ENOSPC);
-
- fdb = mlx5_create_auto_grouped_flow_table(ns,
- table_prio,
- sz,
- ESW_OFFLOADS_NUM_GROUPS,
- level,
- flags);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
- (int)PTR_ERR(fdb), table_prio, level, sz);
- put_sz_to_pool(esw, sz);
- }
-
- return fdb;
-}
-
-static struct mlx5_flow_table *
-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_table *fdb = NULL;
- struct mlx5_flow_namespace *ns;
- int table_prio, l = 0;
- u32 flags = 0;
-
- if (chain == FDB_TC_SLOW_PATH_CHAIN)
- return esw->fdb_table.offloads.slow_fdb;
-
- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
-
- fdb = fdb_prio_table(esw, chain, prio, level).fdb;
- if (fdb) {
- /* take ref on earlier levels as well */
- while (level >= 0)
- fdb_prio_table(esw, chain, prio, level--).num_rules++;
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return fdb;
- }
-
- ns = mlx5_get_fdb_sub_ns(dev, chain);
- if (!ns) {
- esw_warn(dev, "Failed to get FDB sub namespace\n");
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
- table_prio = prio - 1;
-
- /* create earlier levels for correct fs_core lookup when
- * connecting tables
- */
- for (l = 0; l <= level; l++) {
- if (fdb_prio_table(esw, chain, prio, l).fdb) {
- fdb_prio_table(esw, chain, prio, l).num_rules++;
- continue;
- }
-
- fdb = create_next_size_table(esw, ns, table_prio, l, flags);
- if (IS_ERR(fdb)) {
- l--;
- goto err_create_fdb;
- }
-
- fdb_prio_table(esw, chain, prio, l).fdb = fdb;
- fdb_prio_table(esw, chain, prio, l).num_rules = 1;
- }
-
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return fdb;
-
-err_create_fdb:
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- if (l >= 0)
- esw_put_prio_table(esw, chain, prio, l);
-
- return fdb;
-}
-
-static void
-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
-{
- int l;
-
- if (chain == FDB_TC_SLOW_PATH_CHAIN)
- return;
-
- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
-
- for (l = level; l >= 0; l--) {
- if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
- continue;
-
- put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
- mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
- fdb_prio_table(esw, chain, prio, l).fdb = NULL;
- }
-
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
-}
-
-static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
-{
- /* If lazy creation isn't supported, deref the fast path tables */
- if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
- esw_put_prio_table(esw, 0, 1, 1);
- esw_put_prio_table(esw, 0, 1, 0);
- }
-}
-
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -1055,16 +868,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
- u32 *flow_group_in, max_flow_counter;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int table_size, ix, err = 0, i;
+ u32 flags = 0, *flow_group_in;
+ int table_size, ix, err = 0;
struct mlx5_flow_group *g;
- u32 flags = 0, fdb_max;
void *match_criteria;
u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
+
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1083,19 +896,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
- fdb_max);
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
- esw->fdb_table.offloads.fdb_left[i] =
- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
-
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports;
@@ -1118,16 +918,10 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- /* If lazy creation isn't supported, open the fast path tables now */
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
- esw_get_prio_table(esw, 0, 1, 0);
- esw_get_prio_table(esw, 0, 1, 1);
- } else {
- esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ err = mlx5_esw_chains_create(esw);
+ if (err) {
+ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ goto fdb_chains_err;
}
/* create send-to-vport group */
@@ -1218,7 +1012,8 @@ miss_err:
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- esw_destroy_offloads_fast_fdb_tables(esw);
+ mlx5_esw_chains_destroy(esw);
+fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
@@ -1240,8 +1035,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+ mlx5_esw_chains_destroy(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
- esw_destroy_offloads_fast_fdb_tables(esw);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
@@ -1377,7 +1172,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -2111,7 +1906,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
- mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
@@ -2220,7 +2014,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
- int err;
+ struct mlx5_vport *vport;
+ int err, i;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2237,6 +2032,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
+ /* Representor will control the vport link state */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_vports;
@@ -2266,7 +2065,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
new file mode 100644
index 000000000000..c5a446e295aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/fs.h>
+
+#include "eswitch_offloads_chains.h"
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en.h"
+
+#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
+#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
+#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
+#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
+#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
+#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
+#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
+#define fdb_ignore_flow_level_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
+
+#define ESW_OFFLOADS_NUM_GROUPS 4
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
+ * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via get_next_avail_sz_from_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define ESW_SIZE (16 * 1024 * 1024)
+static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 4 * 1024, };
+
+struct mlx5_esw_chains_priv {
+ struct rhashtable chains_ht;
+ struct rhashtable prios_ht;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *tc_end_fdb;
+
+ int fdb_left[ARRAY_SIZE(ESW_POOLS)];
+};
+
+struct fdb_chain {
+ struct rhash_head node;
+
+ u32 chain;
+
+ int ref;
+
+ struct mlx5_eswitch *esw;
+ struct list_head prios_list;
+};
+
+struct fdb_prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct fdb_prio {
+ struct rhash_head node;
+ struct list_head list;
+
+ struct fdb_prio_key key;
+
+ int ref;
+
+ struct fdb_chain *fdb_chain;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *next_fdb;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fdb_chain, node),
+ .key_offset = offsetof(struct fdb_chain, chain),
+ .key_len = sizeof_field(struct fdb_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct fdb_prio, node),
+ .key_offset = offsetof(struct fdb_prio, key),
+ .key_len = sizeof_field(struct fdb_prio, key),
+ .automatic_shrinking = true,
+};
+
+bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return 1;
+
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX - 1;
+
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_chains_get_chain_range(esw) + 1;
+}
+
+u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return 1;
+
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX;
+
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
+{
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX;
+
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+#define POOL_NEXT_SIZE 0
+static int
+mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
+ int desired_size)
+{
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
+ if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --fdb_pool_left(esw)[found_i];
+ return ESW_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+static void
+mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
+ if (sz == ESW_POOLS[i]) {
+ ++fdb_pool_left(esw)[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
+}
+
+static void
+mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
+{
+ u32 fdb_max;
+ int i;
+
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
+ fdb_pool_left(esw)[i] =
+ ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
+}
+
+static struct mlx5_flow_table *
+mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ int sz;
+
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+ ft_attr.max_fte = sz;
+
+ /* We use tc_slow_fdb(esw) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = tc_slow_fdb(esw);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous prio (FDB_BYPASS_PATH if exists).
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!fdb_ignore_flow_level_supported(esw) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = level;
+ ft_attr.prio = prio - 1;
+ ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1.
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = 1;
+ ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev,
+ "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(fdb), chain, prio, level, sz);
+ mlx5_esw_chains_put_sz_to_pool(esw, sz);
+ return fdb;
+ }
+
+ return fdb;
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb)
+{
+ mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
+ mlx5_destroy_flow_table(fdb);
+}
+
+static struct fdb_chain *
+mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
+{
+ struct fdb_chain *fdb_chain = NULL;
+ int err;
+
+ fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
+ if (!fdb_chain)
+ return ERR_PTR(-ENOMEM);
+
+ fdb_chain->esw = esw;
+ fdb_chain->chain = chain;
+ INIT_LIST_HEAD(&fdb_chain->prios_list);
+
+ err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
+ chain_params);
+ if (err)
+ goto err_insert;
+
+ return fdb_chain;
+
+err_insert:
+ kvfree(fdb_chain);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
+{
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+
+ rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
+ chain_params);
+ kvfree(fdb_chain);
+}
+
+static struct fdb_chain *
+mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
+{
+ struct fdb_chain *fdb_chain;
+
+ fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
+ chain_params);
+ if (!fdb_chain) {
+ fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
+ if (IS_ERR(fdb_chain))
+ return fdb_chain;
+ }
+
+ fdb_chain->ref++;
+
+ return fdb_chain;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
+ struct mlx5_flow_table *next_fdb)
+{
+ static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_fdb;
+
+ return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
+}
+
+static int
+mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
+ struct mlx5_flow_table *next_fdb)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
+ struct fdb_prio *pos;
+ int n = 0, err;
+
+ if (fdb_prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = fdb_prio;
+ list_for_each_entry_continue_reverse(pos,
+ &fdb_chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb,
+ next_fdb);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = fdb_prio;
+ list_for_each_entry_continue_reverse(pos,
+ &fdb_chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_fdb = next_fdb;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
+{
+ if (--fdb_chain->ref == 0)
+ mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
+}
+
+static struct fdb_prio *
+mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_group *miss_group;
+ struct fdb_prio *fdb_prio = NULL;
+ struct mlx5_flow_table *next_fdb;
+ struct fdb_chain *fdb_chain;
+ struct mlx5_flow_table *fdb;
+ struct list_head *pos;
+ u32 *flow_group_in;
+ int err;
+
+ fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
+ if (IS_ERR(fdb_chain))
+ return ERR_CAST(fdb_chain);
+
+ fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!fdb_prio || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
+ tc_slow_fdb(esw) :
+ tc_end_fdb(esw);
+ list_for_each(pos, &fdb_chain->prios_list) {
+ struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
+ break;
+ }
+ }
+
+ fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ fdb->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ fdb->max_fte - 1);
+ miss_group = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_fdb */
+ miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ fdb_prio->miss_group = miss_group;
+ fdb_prio->miss_rule = miss_rule;
+ fdb_prio->next_fdb = next_fdb;
+ fdb_prio->fdb_chain = fdb_chain;
+ fdb_prio->key.chain = chain;
+ fdb_prio->key.prio = prio;
+ fdb_prio->key.level = level;
+ fdb_prio->fdb = fdb;
+
+ err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&fdb_prio->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return fdb_prio;
+
+err_update:
+ list_del(&fdb_prio->list);
+ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+err_insert:
+ mlx5_del_flow_rules(miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_esw_chains_destroy_fdb_table(esw, fdb);
+err_create:
+err_alloc:
+ kvfree(fdb_prio);
+ kvfree(flow_group_in);
+ mlx5_esw_chains_put_fdb_chain(fdb_chain);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
+ struct fdb_prio *fdb_prio)
+{
+ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
+
+ WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
+ fdb_prio->next_fdb));
+
+ list_del(&fdb_prio->list);
+ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+ mlx5_del_flow_rules(fdb_prio->miss_rule);
+ mlx5_destroy_flow_group(fdb_prio->miss_group);
+ mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
+ mlx5_esw_chains_put_fdb_chain(fdb_chain);
+ kvfree(fdb_prio);
+}
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct fdb_prio *fdb_prio;
+ struct fdb_prio_key key;
+ int l = 0;
+
+ if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
+ chain != mlx5_esw_chains_get_ft_chain(esw)) ||
+ prio > mlx5_esw_chains_get_prio_range(esw) ||
+ level > mlx5_esw_chains_get_level_range(esw))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ fdb_prio = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&esw_chains_lock(esw));
+ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
+ prio_params);
+ if (!fdb_prio) {
+ fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
+ prio, level);
+ if (IS_ERR(fdb_prio))
+ goto err_create_prio;
+ }
+
+ ++fdb_prio->ref;
+ mutex_unlock(&esw_chains_lock(esw));
+
+ return fdb_prio->fdb;
+
+err_create_prio:
+ mutex_unlock(&esw_chains_lock(esw));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_esw_chains_put_table(esw, chain, prio, l);
+ return ERR_CAST(fdb_prio);
+}
+
+void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level)
+{
+ struct fdb_prio *fdb_prio;
+ struct fdb_prio_key key;
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&esw_chains_lock(esw));
+ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
+ prio_params);
+ if (!fdb_prio)
+ goto err_get_prio;
+
+ if (--fdb_prio->ref == 0)
+ mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
+ mutex_unlock(&esw_chains_lock(esw));
+
+ while (level-- > 0)
+ mlx5_esw_chains_put_table(esw, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&esw_chains_lock(esw));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
+{
+ return tc_end_fdb(esw);
+}
+
+static int
+mlx5_esw_chains_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_chains_priv *chains_priv;
+ struct mlx5_core_dev *dev = esw->dev;
+ u32 max_flow_counter, fdb_max;
+ int err;
+
+ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
+ if (!chains_priv)
+ return -ENOMEM;
+ esw_chains_priv(esw) = chains_priv;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_debug(dev,
+ "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
+ max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+
+ mlx5_esw_chains_init_sz_pool(esw);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else {
+ esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
+ mlx5_esw_chains_get_chain_range(esw),
+ mlx5_esw_chains_get_prio_range(esw));
+ }
+
+ err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
+ if (err)
+ goto init_chains_ht_err;
+
+ err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
+ if (err)
+ goto init_prios_ht_err;
+
+ mutex_init(&esw_chains_lock(esw));
+
+ return 0;
+
+init_prios_ht_err:
+ rhashtable_destroy(&esw_chains_ht(esw));
+init_chains_ht_err:
+ kfree(chains_priv);
+ return err;
+}
+
+static void
+mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
+{
+ mutex_destroy(&esw_chains_lock(esw));
+ rhashtable_destroy(&esw_prios_ht(esw));
+ rhashtable_destroy(&esw_chains_ht(esw));
+
+ kfree(esw_chains_priv(esw));
+}
+
+static int
+mlx5_esw_chains_open(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+
+ /* Create tc_end_fdb(esw) which is the always created ft chain */
+ ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
+ 1, 0);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ tc_end_fdb(esw) = ft;
+
+ /* Always open the root for fast path */
+ ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_0_err;
+ }
+
+ /* Open level 1 for split rules now if prios isn't supported */
+ if (!mlx5_esw_chains_prios_supported(esw)) {
+ ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
+
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_1_err;
+ }
+ }
+
+ return 0;
+
+level_1_err:
+ mlx5_esw_chains_put_table(esw, 0, 1, 0);
+level_0_err:
+ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
+ return err;
+}
+
+static void
+mlx5_esw_chains_close(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ mlx5_esw_chains_put_table(esw, 0, 1, 1);
+ mlx5_esw_chains_put_table(esw, 0, 1, 0);
+ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
+}
+
+int
+mlx5_esw_chains_create(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = mlx5_esw_chains_init(esw);
+ if (err)
+ return err;
+
+ err = mlx5_esw_chains_open(esw);
+ if (err)
+ goto err_open;
+
+ return 0;
+
+err_open:
+ mlx5_esw_chains_cleanup(esw);
+ return err;
+}
+
+void
+mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
+{
+ mlx5_esw_chains_close(esw);
+ mlx5_esw_chains_cleanup(esw);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
new file mode 100644
index 000000000000..2e13097fe348
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+bool
+mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
+
+int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
+void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
+
+#endif /* __ML5_ESW_CHAINS_H__ */
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 366bda1bb1c3..dc08ed9339ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_flow_act *flow_act)
{
static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int prio, flags;
int err;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- prio = FDB_SLOW_PATH;
- flags = MLX5_FLOW_TABLE_TERMINATION;
- tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
- 0, flags);
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 3c816e81f8d9..b25465d9e030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ MLX5_SET(set_fte_in, in, ignore_flow_level,
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8c5df6c7d7b6..c7a16ae05fa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -579,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
- if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
+ if (ft->autogroup.active &&
+ fg->max_ftes == ft->autogroup.group_size &&
+ fg->start_index < ft->autogroup.max_fte)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
@@ -1006,7 +1008,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
u16 vport)
{
struct mlx5_flow_root_namespace *root = find_root(&ns->node);
- struct mlx5_flow_table *next_ft = NULL;
+ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
+ struct mlx5_flow_table *next_ft;
struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
int log_table_sz;
@@ -1023,14 +1026,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = -EINVAL;
goto unlock_root;
}
- if (ft_attr->level >= fs_prio->num_levels) {
- err = -ENOSPC;
- goto unlock_root;
+ if (!unmanaged) {
+ /* The level is related to the
+ * priority level range.
+ */
+ if (ft_attr->level >= fs_prio->num_levels) {
+ err = -ENOSPC;
+ goto unlock_root;
+ }
+
+ ft_attr->level += fs_prio->start_level;
}
+
/* The level is related to the
* priority level range.
*/
- ft_attr->level += fs_prio->start_level;
ft = alloc_flow_table(ft_attr->level,
vport,
ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
@@ -1043,19 +1053,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
- next_ft = find_next_chained_ft(fs_prio);
+ next_ft = unmanaged ? ft_attr->next_ft :
+ find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
- err = connect_flow_table(root->dev, ft, fs_prio);
- if (err)
- goto destroy_ft;
+ if (!unmanaged) {
+ err = connect_flow_table(root->dev, ft, fs_prio);
+ if (err)
+ goto destroy_ft;
+ }
+
ft->node.active = true;
down_write_ref_node(&fs_prio->node, false);
- tree_add_node(&ft->node, &fs_prio->node);
- list_add_flow_table(ft, fs_prio);
+ if (!unmanaged) {
+ tree_add_node(&ft->node, &fs_prio->node);
+ list_add_flow_table(ft, fs_prio);
+ } else {
+ ft->node.root = fs_prio->node.root;
+ }
fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock);
@@ -1103,31 +1121,27 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags)
+ struct mlx5_flow_table_attr *ft_attr)
{
- struct mlx5_flow_table_attr ft_attr = {};
+ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
+ int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
+ int max_num_groups = ft_attr->autogroup.max_num_groups;
struct mlx5_flow_table *ft;
- if (max_num_groups > num_flow_table_entries)
+ if (max_num_groups > autogroups_max_fte)
+ return ERR_PTR(-EINVAL);
+ if (num_reserved_entries > ft_attr->max_fte)
return ERR_PTR(-EINVAL);
- ft_attr.max_fte = num_flow_table_entries;
- ft_attr.prio = prio;
- ft_attr.level = level;
- ft_attr.flags = flags;
-
- ft = mlx5_create_flow_table(ns, &ft_attr);
+ ft = mlx5_create_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ft;
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
+ ft->autogroup.max_fte = autogroups_max_fte;
/* We save place for flow groups in addition to max types */
- ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
+ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
return ft;
}
@@ -1149,7 +1163,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg;
int err;
- if (ft->autogroup.active)
+ if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
return ERR_PTR(-EPERM);
down_write_ref_node(&ft->node, false);
@@ -1322,9 +1336,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
const struct mlx5_flow_spec *spec)
{
struct list_head *prev = &ft->node.children;
- struct mlx5_flow_group *fg;
+ u32 max_fte = ft->autogroup.max_fte;
unsigned int candidate_index = 0;
unsigned int group_size = 0;
+ struct mlx5_flow_group *fg;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
@@ -1332,7 +1347,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
group_size = ft->autogroup.group_size;
- /* ft->max_fte == ft->autogroup.max_types */
+ /* max_fte == ft->autogroup.max_types */
if (group_size == 0)
group_size = 1;
@@ -1345,7 +1360,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte)
+ if (candidate_index + group_size > max_fte)
return ERR_PTR(-ENOSPC);
fg = alloc_insert_flow_group(ft,
@@ -1529,18 +1544,30 @@ static bool counter_is_valid(u32 action)
}
static bool dest_is_valid(struct mlx5_flow_destination *dest,
- u32 action,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_table *ft)
{
+ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
+ u32 action = flow_act->action;
+
if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
return counter_is_valid(action);
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true;
+ if (ignore_level) {
+ if (ft->type != FS_FT_FDB)
+ return false;
+
+ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ dest->ft->type != FS_FT_FDB)
+ return false;
+ }
+
if (!dest || ((dest->type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
- (dest->ft->level <= ft->level)))
+ (dest->ft->level <= ft->level && !ignore_level)))
return false;
return true;
}
@@ -1770,7 +1797,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
return ERR_PTR(-EINVAL);
for (i = 0; i < dest_num; i++) {
- if (!dest_is_valid(&dest[i], flow_act->action, ft))
+ if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
}
nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
@@ -2033,7 +2060,8 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
int err = 0;
mutex_lock(&root->chain_lock);
- err = disconnect_flow_table(ft);
+ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
+ err = disconnect_flow_table(ft);
if (err) {
mutex_unlock(&root->chain_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c2621b911563..be5f5e32c1e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -164,6 +164,7 @@ struct mlx5_flow_table {
unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups;
+ unsigned int max_fte;
} autogroup;
/* Protect fwd_rules */
struct mutex lock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index a19790dee7b2..d89ff1d09119 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
MLX5_PCAM_REGS_5000_TO_507F);
}
-static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
+static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev,
+ enum mlx5_mcam_reg_groups group)
{
- return mlx5_query_mcam_reg(dev, dev->caps.mcam,
- MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
- MLX5_MCAM_REGS_FIRST_128);
+ return mlx5_query_mcam_reg(dev, dev->caps.mcam[group],
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group);
}
static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
@@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
- if (MLX5_CAP_GEN(dev, mcam_reg))
- mlx5_get_mcam_reg(dev);
+ if (MLX5_CAP_GEN(dev, mcam_reg)) {
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ }
if (MLX5_CAP_GEN(dev, qcam_reg))
mlx5_get_qcam_reg(dev);
@@ -245,6 +248,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 3ed8ab2d703d..56078b23f1a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -87,8 +87,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
- mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
- priv->max_nch, netdev->mtu);
+ mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
+ netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -419,6 +419,28 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
}
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
+};
+
+static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5i_stats_grps);
+}
+
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
@@ -435,6 +457,8 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5i_stats_grps,
+ .stats_grps_num = mlx5i_stats_grps_num,
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index fc0d9583475d..b91eabc09fbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -586,7 +586,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier(&ldev->nb)) {
+ if (register_netdevice_notifier_dev_net(netdev, &ldev->nb,
+ &ldev->nn)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -599,7 +600,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev)
+void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev;
int i;
@@ -619,7 +620,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
- unregister_netdevice_notifier(&ldev->nb);
+ unregister_netdevice_notifier_dev_net(netdev, &ldev->nb,
+ &ldev->nn);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..316ab09e2664 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -44,6 +44,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ struct netdev_net_notifier nn;
struct lag_mp lag_mp;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index b70afa310ad2..416676c35b1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -200,8 +200,6 @@ static void mlx5_lag_fib_update(struct work_struct *work)
rtnl_lock();
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
fib_work->fen_info.fi);
@@ -259,8 +257,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
switch (event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cf7b8da0f010..f554cfddcf4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index da67b28d6e23..fcce9e0fc82c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 004c56c2fc0c..6dec2a550a10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -677,9 +677,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
goto out_invalid_arg;
}
if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ mlx5_core_warn_once(dmn->mdev,
+ "Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
- "Destination table level should be higher than source table\n");
- goto out_invalid_arg;
+ "Connecting table at level %d to a destination table at level %d\n",
+ matcher->tbl->level,
+ action->dest_tbl.tbl->level);
}
attr.final_icm_addr = rx_rule ?
action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
@@ -690,9 +693,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* get the relevant addresses */
if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
- ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
- action->dest_tbl.fw_tbl.ft->type,
- action->dest_tbl.fw_tbl.ft->id,
+ ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
+ action->dest_tbl.fw_tbl.type,
+ action->dest_tbl.fw_tbl.id,
&output);
if (!ret) {
action->dest_tbl.fw_tbl.tx_icm_addr =
@@ -982,8 +985,106 @@ dec_ref:
}
struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev)
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests)
+{
+ struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
+ struct mlx5dr_action **ref_actions;
+ struct mlx5dr_action *action;
+ bool reformat_req = false;
+ u32 num_of_ref = 0;
+ int ret;
+ int i;
+
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5dr_err(dmn, "Multiple destination support is for FDB only\n");
+ return NULL;
+ }
+
+ hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ if (!hw_dests)
+ return NULL;
+
+ ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (!ref_actions)
+ goto free_hw_dests;
+
+ for (i = 0; i < num_of_dests; i++) {
+ struct mlx5dr_action *reformat_action = dests[i].reformat;
+ struct mlx5dr_action *dest_action = dests[i].dest;
+
+ ref_actions[num_of_ref++] = dest_action;
+
+ switch (dest_action->action_type) {
+ case DR_ACTION_TYP_VPORT:
+ hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ hw_dests[i].vport.num = dest_action->vport.caps->num;
+ hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
+ if (reformat_action) {
+ reformat_req = true;
+ hw_dests[i].vport.reformat_id =
+ reformat_action->reformat.reformat_id;
+ ref_actions[num_of_ref++] = reformat_action;
+ hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ }
+ break;
+
+ case DR_ACTION_TYP_FT:
+ hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (dest_action->dest_tbl.is_fw_tbl)
+ hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
+ else
+ hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
+ break;
+
+ default:
+ mlx5dr_dbg(dmn, "Invalid multiple destinations action\n");
+ goto free_ref_actions;
+ }
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ goto free_ref_actions;
+
+ ret = mlx5dr_fw_create_md_tbl(dmn,
+ hw_dests,
+ num_of_dests,
+ reformat_req,
+ &action->dest_tbl.fw_tbl.id,
+ &action->dest_tbl.fw_tbl.group_id);
+ if (ret)
+ goto free_action;
+
+ refcount_inc(&dmn->refcount);
+
+ for (i = 0; i < num_of_ref; i++)
+ refcount_inc(&ref_actions[i]->refcount);
+
+ action->dest_tbl.is_fw_tbl = true;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl.fw_tbl.ref_actions = ref_actions;
+ action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
+
+ kfree(hw_dests);
+
+ return action;
+
+free_action:
+ kfree(action);
+free_ref_actions:
+ kfree(ref_actions);
+free_hw_dests:
+ kfree(hw_dests);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
+ struct mlx5_flow_table *ft)
{
struct mlx5dr_action *action;
@@ -992,8 +1093,11 @@ mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
return NULL;
action->dest_tbl.is_fw_tbl = 1;
- action->dest_tbl.fw_tbl.ft = ft;
- action->dest_tbl.fw_tbl.mdev = mdev;
+ action->dest_tbl.fw_tbl.type = ft->type;
+ action->dest_tbl.fw_tbl.id = ft->id;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
return action;
}
@@ -1213,58 +1317,85 @@ not_found:
}
static int
-dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
- __be64 *sw_action,
- __be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
- u8 offset, length, max_length, action;
+ u8 max_length;
u16 sw_field;
- u8 hw_opcode;
u32 data;
/* Get SW modify action data */
- action = MLX5_GET(set_action_in, sw_action, action_type);
- length = MLX5_GET(set_action_in, sw_action, length);
- offset = MLX5_GET(set_action_in, sw_action, offset);
sw_field = MLX5_GET(set_action_in, sw_action, field);
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
if (!hw_action_info) {
- mlx5dr_dbg(dmn, "Modify action invalid field given\n");
+ mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
}
max_length = hw_action_info->end - hw_action_info->start + 1;
- switch (action) {
- case MLX5_ACTION_TYPE_SET:
- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET;
- /* PRM defines that length zero specific length of 32bits */
- if (!length)
- length = 32;
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
- if (length + offset > max_length) {
- mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
- return -EINVAL;
- }
- break;
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
+ hw_action_info->hw_field);
- case MLX5_ACTION_TYPE_ADD:
- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD;
- offset = 0;
- length = max_length;
- break;
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
+ hw_action_info->start);
- default:
- mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
- return -EOPNOTSUPP;
+ /* PRM defines that length zero specific length of 32bits */
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length,
+ max_length == 32 ? 0 : max_length);
+
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+
+ *ret_hw_info = hw_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u8 offset, length, max_length;
+ u16 sw_field;
+ u32 data;
+
+ /* Get SW modify action data */
+ length = MLX5_GET(set_action_in, sw_action, length);
+ offset = MLX5_GET(set_action_in, sw_action, offset);
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ data = MLX5_GET(set_action_in, sw_action, data);
+
+ /* Convert SW data to HW modify action format */
+ hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ if (!hw_action_info) {
+ mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
+ return -EINVAL;
+ }
+
+ /* PRM defines that length zero specific length of 32bits */
+ length = length ? length : 32;
+
+ max_length = hw_action_info->end - hw_action_info->start + 1;
+
+ if (length + offset > max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
@@ -1283,48 +1414,236 @@ dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
}
static int
-dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn,
- const __be64 *sw_action)
+dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_dst_hw_info,
+ const struct dr_action_modify_field_conv **ret_src_hw_info)
+{
+ u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
+ const struct dr_action_modify_field_conv *hw_dst_action_info;
+ const struct dr_action_modify_field_conv *hw_src_action_info;
+ u16 src_field, dst_field;
+
+ /* Get SW modify action data */
+ src_field = MLX5_GET(copy_action_in, sw_action, src_field);
+ dst_field = MLX5_GET(copy_action_in, sw_action, dst_field);
+ src_offset = MLX5_GET(copy_action_in, sw_action, src_offset);
+ dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset);
+ length = MLX5_GET(copy_action_in, sw_action, length);
+
+ /* Convert SW data to HW modify action format */
+ hw_src_action_info = dr_action_modify_get_hw_info(src_field);
+ hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ if (!hw_src_action_info || !hw_dst_action_info) {
+ mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
+ return -EINVAL;
+ }
+
+ /* PRM defines that length zero specific length of 32bits */
+ length = length ? length : 32;
+
+ src_max_length = hw_src_action_info->end -
+ hw_src_action_info->start + 1;
+ dst_max_length = hw_dst_action_info->end -
+ hw_dst_action_info->start + 1;
+
+ if (length + src_offset > src_max_length ||
+ length + dst_offset > dst_max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
+ }
+
+ MLX5_SET(dr_action_hw_copy, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
+ hw_dst_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
+ hw_dst_action_info->start + dst_offset);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
+ length == 32 ? 0 : length);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
+ hw_src_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
+ hw_src_action_info->start + dst_offset);
+
+ *ret_dst_hw_info = hw_dst_action_info;
+ *ret_src_hw_info = hw_src_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_dst_hw_info,
+ const struct dr_action_modify_field_conv **ret_src_hw_info)
{
- u16 sw_field;
u8 action;
+ int ret;
- sw_field = MLX5_GET(set_action_in, sw_action, field);
+ *hw_action = 0;
+ *ret_src_hw_info = NULL;
+
+ /* Get SW modify action type */
action = MLX5_GET(set_action_in, sw_action, action_type);
- /* Check if SW field is supported in current domain (RX/TX) */
- if (action == MLX5_ACTION_TYPE_SET) {
- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ switch (action) {
+ case MLX5_ACTION_TYPE_SET:
+ ret = dr_action_modify_sw_to_hw_set(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info);
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ ret = dr_action_modify_sw_to_hw_add(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info);
+ break;
+
+ case MLX5_ACTION_TYPE_COPY:
+ ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info,
+ ret_src_hw_info);
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int
+dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ action->rewrite.allow_rx = 0;
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ action->rewrite.allow_tx = 0;
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+
+ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+
+ if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for add action\n",
+ sw_field);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ u16 sw_fields[2];
+ int i;
+
+ sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field);
+ sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field);
+
+ for (i = 0; i < 2; i++) {
+ if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ action->rewrite.allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
- sw_field);
+ sw_fields[i]);
return -EINVAL;
}
- }
-
- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ action->rewrite.allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
- sw_field);
+ sw_fields[i]);
return -EINVAL;
}
}
- } else if (action == MLX5_ACTION_TYPE_ADD) {
- if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
- mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field);
- return -EINVAL;
- }
- } else {
- mlx5dr_info(dmn, "Unsupported action %d modify action\n", action);
- return -EOPNOTSUPP;
+ }
+
+ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
+ return -EINVAL;
}
return 0;
}
+static int
+dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ u8 action_type;
+ int ret;
+
+ action_type = MLX5_GET(set_action_in, sw_action, action_type);
+
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ ret = dr_action_modify_check_set_field_limitation(action,
+ sw_action);
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ ret = dr_action_modify_check_add_field_limitation(action,
+ sw_action);
+ break;
+
+ case MLX5_ACTION_TYPE_COPY:
+ ret = dr_action_modify_check_copy_field_limitation(action,
+ sw_action);
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action %d modify action\n",
+ action_type);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static bool
dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
{
@@ -1333,7 +1652,7 @@ dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
-static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
+static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
__be64 sw_actions[],
@@ -1341,20 +1660,26 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct dr_action_modify_field_conv *hw_dst_action_info;
+ const struct dr_action_modify_field_conv *hw_src_action_info;
u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
*modify_ttl = false;
+ action->rewrite.allow_rx = 1;
+ action->rewrite.allow_tx = 1;
+
for (i = 0; i < num_sw_actions; i++) {
sw_action = &sw_actions[i];
- ret = dr_action_modify_check_field_limitation(dmn, sw_action);
+ ret = dr_action_modify_check_field_limitation(action,
+ sw_action);
if (ret)
return ret;
@@ -1365,32 +1690,35 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
ret = dr_action_modify_sw_to_hw(dmn,
sw_action,
&hw_action,
- &hw_action_info);
+ &hw_dst_action_info,
+ &hw_src_action_info);
if (ret)
return ret;
/* Due to a HW limitation we cannot modify 2 different L3 types */
- if (l3_type && hw_action_info->l3_type &&
- hw_action_info->l3_type != l3_type) {
+ if (l3_type && hw_dst_action_info->l3_type &&
+ hw_dst_action_info->l3_type != l3_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
return -EINVAL;
}
- if (hw_action_info->l3_type)
- l3_type = hw_action_info->l3_type;
+ if (hw_dst_action_info->l3_type)
+ l3_type = hw_dst_action_info->l3_type;
/* Due to a HW limitation we cannot modify two different L4 types */
- if (l4_type && hw_action_info->l4_type &&
- hw_action_info->l4_type != l4_type) {
+ if (l4_type && hw_dst_action_info->l4_type &&
+ hw_dst_action_info->l4_type != l4_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
return -EINVAL;
}
- if (hw_action_info->l4_type)
- l4_type = hw_action_info->l4_type;
+ if (hw_dst_action_info->l4_type)
+ l4_type = hw_dst_action_info->l4_type;
/* HW reads and executes two actions at once this means we
* need to create a gap if two actions access the same field
*/
- if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) {
+ if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field ||
+ (hw_src_action_info &&
+ hw_field == hw_src_action_info->hw_field))) {
/* Check if after gap insertion the total number of HW
* modify actions doesn't exceeds the limit
*/
@@ -1400,7 +1728,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
return -EINVAL;
}
}
- hw_field = hw_action_info->hw_field;
+ hw_field = hw_dst_action_info->hw_field;
hw_actions[hw_idx] = hw_action;
hw_idx++;
@@ -1443,7 +1771,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
goto free_chunk;
}
- ret = dr_actions_convert_modify_header(dmn,
+ ret = dr_actions_convert_modify_header(action,
max_hw_actions,
num_sw_actions,
actions,
@@ -1559,8 +1887,26 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
switch (action->action_type) {
case DR_ACTION_TYP_FT:
- if (!action->dest_tbl.is_fw_tbl)
+ if (action->dest_tbl.is_fw_tbl)
+ refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
+ else
refcount_dec(&action->dest_tbl.tbl->refcount);
+
+ if (action->dest_tbl.is_fw_tbl &&
+ action->dest_tbl.fw_tbl.num_of_ref_actions) {
+ struct mlx5dr_action **ref_actions;
+ int i;
+
+ ref_actions = action->dest_tbl.fw_tbl.ref_actions;
+ for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
+ refcount_dec(&ref_actions[i]->refcount);
+
+ kfree(ref_actions);
+
+ mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
+ action->dest_tbl.fw_tbl.id,
+ action->dest_tbl.fw_tbl.group_id);
+ }
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
refcount_dec(&action->reformat.dmn->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 41662c4e2664..461b39376daf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -320,12 +320,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
- u32 table_type,
- u64 icm_addr_rx,
- u64 icm_addr_tx,
- u8 level,
- bool sw_owner,
- bool term_tbl,
+ struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id)
{
@@ -335,37 +330,43 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
int err;
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
- MLX5_SET(create_flow_table_in, in, table_type, table_type);
+ MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
- MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl);
- MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner);
- MLX5_SET(flow_table_context, ft_mdev, level, level);
+ MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
+ MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
+ MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
- if (sw_owner) {
+ if (attr->sw_owner) {
/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
* icm_addr_1 used for FDB TX
*/
- if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
+ if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_rx);
- } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
+ sw_owner_icm_root_0, attr->icm_addr_rx);
+ } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_tx);
- } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
+ sw_owner_icm_root_0, attr->icm_addr_tx);
+ } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_rx);
+ sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, icm_addr_tx);
+ sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
+ MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+ attr->decap_en);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+ attr->reformat_en);
+
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
- if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB)
+ if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
+ fdb_rx_icm_addr)
*fdb_rx_icm_addr =
(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
@@ -478,3 +479,208 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
return 0;
}
+
+static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
+ struct mlx5dr_cmd_fte_info *fte,
+ bool *extended_dest)
+{
+ int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+ int num_fwd_destinations = 0;
+ int num_encap = 0;
+ int i;
+
+ *extended_dest = false;
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return 0;
+ for (i = 0; i < fte->dests_size; i++) {
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ num_encap++;
+ num_fwd_destinations++;
+ }
+
+ if (num_fwd_destinations > 1 && num_encap > 0)
+ *extended_dest = true;
+
+ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+ mlx5_core_warn(dev, "FW does not support extended destination");
+ return -EOPNOTSUPP;
+ }
+ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+ mlx5_core_warn(dev, "FW does not support more than %d encaps",
+ 1 << fw_log_max_fdb_encap_uplink);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5dr_cmd_ft_info *ft,
+ u32 group_id,
+ struct mlx5dr_cmd_fte_info *fte)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+ void *in_flow_context, *vlan;
+ bool extended_dest = false;
+ void *in_match_value;
+ unsigned int inlen;
+ int dst_cnt_size;
+ void *in_dests;
+ u32 *in;
+ int err;
+ int i;
+
+ if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
+ return -EOPNOTSUPP;
+
+ if (!extended_dest)
+ dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+ else
+ dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, op_mod, opmod);
+ MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+
+ MLX5_SET(flow_context, in_flow_context, flow_tag,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
+
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+ if (extended_dest) {
+ u32 action;
+
+ action = fte->action.action &
+ ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ MLX5_SET(flow_context, in_flow_context, action, action);
+ } else {
+ MLX5_SET(flow_context, in_flow_context, action,
+ fte->action.action);
+ if (fte->action.pkt_reformat)
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.pkt_reformat->id);
+ }
+ if (fte->action.modify_hdr)
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_hdr->id);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+
+ in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+ match_value);
+ memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
+
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ int list_size = 0;
+
+ for (i = 0; i < fte->dests_size; i++) {
+ unsigned int id, type = fte->dest_arr[i].type;
+
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ id = fte->dest_arr[i].ft_num;
+ type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ id = fte->dest_arr[i].ft_id;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ id = fte->dest_arr[i].vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ fte->dest_arr[i].vport.vhca_id);
+ if (extended_dest && (fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+ MLX5_SET(extended_dest_format, in_dests,
+ packet_reformat_id,
+ fte->dest_arr[i].vport.reformat_id);
+ }
+ break;
+ default:
+ id = fte->dest_arr[i].tir_num;
+ }
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ type);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+ log_max_flow_counter,
+ ft->type));
+ int list_size = 0;
+
+ for (i = 0; i < fte->dests_size; i++) {
+ if (fte->dest_arr[i].type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ fte->dest_arr[i].counter_id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+ if (list_size > max_list_size) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
+ }
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
+ kvfree(in);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 60ef6e6171e3..1fbcd012bb85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -7,6 +7,7 @@
struct mlx5dr_fw_recalc_cs_ft *
mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
{
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
u32 table_id, group_id, modify_hdr_id;
u64 rx_icm_addr, modify_ttl_action;
@@ -16,9 +17,14 @@ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
if (!recalc_cs_ft)
return NULL;
- ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
- 0, 0, dmn->info.caps.max_ft_level - 1,
- false, true, &rx_icm_addr, &table_id);
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ ft_attr.level = dmn->info.caps.max_ft_level - 1;
+ ft_attr.term_tbl = true;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev,
+ &ft_attr,
+ &rx_icm_addr,
+ &table_id);
if (ret) {
mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
goto free_ttl_tbl;
@@ -91,3 +97,70 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
kfree(recalc_cs_ft);
}
+
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_flow_destination_hw_info *dest,
+ int num_dest,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id)
+{
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+ struct mlx5dr_cmd_fte_info fte_info = {};
+ u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {};
+ struct mlx5dr_cmd_ft_info ft_info = {};
+ int ret;
+
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.reformat_en = reformat_req;
+ ft_attr.decap_en = reformat_req;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret);
+ return ret;
+ }
+
+ ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ *tbl_id, group_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret);
+ goto free_flow_table;
+ }
+
+ ft_info.id = *tbl_id;
+ ft_info.type = FS_FT_FDB;
+ fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_info.dests_size = num_dest;
+ fte_info.val = val;
+ fte_info.dest_arr = dest;
+
+ ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret);
+ goto free_flow_group;
+ }
+
+ return 0;
+
+free_flow_group:
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
+ *tbl_id, *group_id);
+free_flow_table:
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+ return ret;
+}
+
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn,
+ u32 tbl_id, u32 group_id)
+{
+ mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id);
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ tbl_id, group_id);
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 51803eef13dd..c7f10d4f8f8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
+#include <linux/smp.h>
#include "dr_types.h"
#define QUEUE_SIZE 128
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e178d8d3dbc9..14ce2d7dbb66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -211,6 +211,9 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
{
+ bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
u64 icm_addr_rx = 0;
u64 icm_addr_tx = 0;
int ret;
@@ -221,18 +224,21 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
if (tbl->tx.s_anchor)
icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
- ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
- tbl->table_type,
- icm_addr_rx,
- icm_addr_tx,
- tbl->dmn->info.caps.max_ft_level - 1,
- true, false, NULL,
- &tbl->table_id);
+ ft_attr.table_type = tbl->table_type;
+ ft_attr.icm_addr_rx = icm_addr_rx;
+ ft_attr.icm_addr_tx = icm_addr_tx;
+ ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1;
+ ft_attr.sw_owner = true;
+ ft_attr.decap_en = en_decap;
+ ft_attr.reformat_en = en_encap;
+
+ ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
+ NULL, &tbl->table_id);
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
{
struct mlx5dr_table *tbl;
int ret;
@@ -245,6 +251,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
tbl->dmn = dmn;
tbl->level = level;
+ tbl->flags = flags;
refcount_set(&tbl->refcount, 1);
ret = dr_table_init(tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3fdf4a5eb031..dffe35145d19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -681,6 +681,7 @@ struct mlx5dr_table {
u32 level;
u32 table_type;
u32 table_id;
+ u32 flags;
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
@@ -744,10 +745,14 @@ struct mlx5dr_action {
union {
struct mlx5dr_table *tbl;
struct {
- struct mlx5_flow_table *ft;
+ struct mlx5dr_domain *dmn;
+ u32 id;
+ u32 group_id;
+ enum fs_flow_table_type type;
u64 rx_icm_addr;
u64 tx_icm_addr;
- struct mlx5_core_dev *mdev;
+ struct mlx5dr_action **ref_actions;
+ u32 num_of_ref_actions;
} fw_tbl;
};
} dest_tbl;
@@ -869,6 +874,17 @@ struct mlx5dr_cmd_query_flow_table_details {
u64 sw_owner_icm_root_0;
};
+struct mlx5dr_cmd_create_flow_table_attr {
+ u32 table_type;
+ u64 icm_addr_rx;
+ u64 icm_addr_tx;
+ u8 level;
+ bool sw_owner;
+ bool term_tbl;
+ bool decap_en;
+ bool reformat_en;
+};
+
/* internal API functions */
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps);
@@ -906,12 +922,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id);
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
- u32 table_type,
- u64 icm_addr_rx,
- u64 icm_addr_tx,
- u8 level,
- bool sw_owner,
- bool term_tbl,
+ struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id);
int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
@@ -1053,6 +1064,43 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
+struct mlx5dr_cmd_ft_info {
+ u32 id;
+ u16 vport;
+ enum fs_flow_table_type type;
+};
+
+struct mlx5dr_cmd_flow_destination_hw_info {
+ enum mlx5_flow_destination_type type;
+ union {
+ u32 tir_num;
+ u32 ft_num;
+ u32 ft_id;
+ u32 counter_id;
+ struct {
+ u16 num;
+ u16 vhca_id;
+ u32 reformat_id;
+ u8 flags;
+ } vport;
+ };
+};
+
+struct mlx5dr_cmd_fte_info {
+ u32 dests_size;
+ u32 index;
+ struct mlx5_flow_context flow_context;
+ u32 *val;
+ struct mlx5_flow_act action;
+ struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
+};
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5dr_cmd_ft_info *ft,
+ u32 group_id,
+ struct mlx5dr_cmd_fte_info *fte);
+
struct mlx5dr_fw_recalc_cs_ft {
u64 rx_icm_addr;
u32 table_id;
@@ -1067,4 +1115,12 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
u32 vport_num,
u64 *rx_icm_addr);
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_flow_destination_hw_info *dest,
+ int num_dest,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id);
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
+ u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3d587d0bdbbe..3abfc8125926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -74,7 +74,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
next_ft);
tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
- ft->level);
+ ft->level, ft->flags);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
@@ -184,13 +184,13 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
-static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
+static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
if (mlx5_dr_is_fw_table(dest_ft->flags))
- return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
+ return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
@@ -206,6 +206,12 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
}
+static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
+{
+ return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+}
+
#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
@@ -213,7 +219,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte *fte)
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
- struct mlx5dr_action *term_action = NULL;
+ struct mlx5dr_action_dest *term_actions;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
@@ -223,6 +229,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_rule *rule;
struct mlx5_flow_rule *dst;
int fs_dr_num_actions = 0;
+ int num_term_actions = 0;
int num_actions = 0;
size_t match_sz;
int err = 0;
@@ -233,18 +240,38 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
GFP_KERNEL);
- if (!actions)
- return -ENOMEM;
+ if (!actions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
sizeof(*fs_dr_actions), GFP_KERNEL);
if (!fs_dr_actions) {
- kfree(actions);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*term_actions), GFP_KERNEL);
+ if (!term_actions) {
+ err = -ENOMEM;
+ goto free_fs_dr_actions_alloc;
}
match_sz = sizeof(fte->val);
+ /* Drop reformat action bit if destination vport set with reformat */
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (!contain_vport_reformat_action(dst))
+ continue;
+
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ break;
+ }
+ }
+
/* The order of the actions are must to be keep, only the following
* order is supported by SW steering:
* TX: push vlan -> modify header -> encap
@@ -335,7 +362,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
}
if (fte->flow_context.flow_tag) {
@@ -352,34 +379,25 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
- u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
- switch (type) {
- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
- id = dst->dest_attr.counter_id;
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
- tmp_action =
- mlx5dr_action_create_flow_counter(id);
- if (!tmp_action) {
- err = -ENOMEM;
- goto free_actions;
- }
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- actions[num_actions++] = tmp_action;
- break;
+ switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
- tmp_action = create_ft_action(dev, dst);
+ tmp_action = create_ft_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst);
@@ -388,7 +406,14 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions].dest = tmp_action;
+
+ if (dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ term_actions[num_term_actions].reformat =
+ dst->dest_attr.vport.pkt_reformat->action.dr_action;
+
+ num_term_actions++;
break;
default:
err = -EOPNOTSUPP;
@@ -397,11 +422,50 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ u32 id;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -ENOSPC;
+ goto free_actions;
+ }
+
+ id = dst->dest_attr.counter_id;
+ tmp_action =
+ mlx5dr_action_create_flow_counter(id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
-
- if (term_action)
- actions[num_actions++] = term_action;
+ if (num_term_actions == 1) {
+ if (term_actions->reformat)
+ actions[num_actions++] = term_actions->reformat;
+
+ actions[num_actions++] = term_actions->dest;
+ } else if (num_term_actions > 1) {
+ tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
+ term_actions,
+ num_term_actions);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
@@ -412,7 +476,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
+ kfree(term_actions);
kfree(actions);
+
fte->fs_dr_rule.dr_rule = rule;
fte->fs_dr_rule.num_actions = fs_dr_num_actions;
fte->fs_dr_rule.dr_actions = fs_dr_actions;
@@ -420,13 +486,18 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- for (i = 0; i < fs_dr_num_actions; i++)
+ /* Free in reverse order to handle action dependencies */
+ for (i = fs_dr_num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
mlx5dr_action_destroy(fs_dr_actions[i]);
- mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
- kfree(actions);
+ kfree(term_actions);
+free_fs_dr_actions_alloc:
kfree(fs_dr_actions);
+free_actions_alloc:
+ kfree(actions);
+out_err:
+ mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
return err;
}
@@ -533,7 +604,8 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
if (err)
return err;
- for (i = 0; i < rule->num_actions; i++)
+ /* Free in reverse order to handle action dependencies */
+ for (i = rule->num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
mlx5dr_action_destroy(rule->dr_actions[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 1722f4668269..e01c3766c7de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -32,6 +32,7 @@ enum {
};
enum {
+ MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
};
@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits {
u8 inline_data[0x20];
};
+struct mlx5_ifc_dr_action_hw_copy_bits {
+ u8 opcode[0x8];
+ u8 destination_field_code[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 reserved_at_20[0x8];
+ u8 source_field_code[0x8];
+ u8 reserved_at_30[0x2];
+ u8 source_left_shifter[0x6];
+ u8 reserved_at_38[0x8];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index adda9cbfba45..e1edc9c247b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -33,6 +33,11 @@ struct mlx5dr_match_parameters {
u64 *match_buf; /* Device spec format */
};
+struct mlx5dr_action_dest {
+ struct mlx5dr_action *dest;
+ struct mlx5dr_action *reformat;
+};
+
#ifdef CONFIG_MLX5_SW_STEERING
struct mlx5dr_domain *
@@ -46,7 +51,7 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
int mlx5dr_table_destroy(struct mlx5dr_table *table);
@@ -75,14 +80,19 @@ struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev);
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+ struct mlx5_flow_table *ft);
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid,
u16 vhca_id);
+struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests);
+
struct mlx5dr_action *mlx5dr_action_create_drop(void);
struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
@@ -131,7 +141,7 @@ mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) { }
static inline struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; }
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
static inline int
mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
@@ -165,8 +175,8 @@ static inline struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
static inline struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev) { return NULL; }
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+ struct mlx5_flow_table *ft) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
@@ -174,6 +184,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u16 vhca_id) { return NULL; }
static inline struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests) { return NULL; }
+
+static inline struct mlx5dr_action *
mlx5dr_action_create_drop(void) { return NULL; }
static inline struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index f2a0e72285ba..02f7e4a39578 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix);
- pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
+ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 2b543911ae00..c4caeeadcba9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -213,8 +213,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
err_register_netdev:
mlxsw_m->ports[local_port] = NULL;
- free_netdev(dev);
err_dev_addr_get:
+ free_netdev(dev);
err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_m->core, local_port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index af30e8a76682..dd6685156396 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3477,10 +3477,10 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
enum mlxsw_reg_qeec_hr {
- MLXSW_REG_QEEC_HIERARCY_PORT,
- MLXSW_REG_QEEC_HIERARCY_GROUP,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_PORT,
+ MLXSW_REG_QEEC_HR_GROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ MLXSW_REG_QEEC_HR_TC,
};
/* reg_qeec_element_hierarchy
@@ -3563,8 +3563,8 @@ MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
*/
MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
-/* A large max rate will disable the max shaper. */
-#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
+/* The largest max shaper value possible to disable the shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS ((1u << 31) - 1) /* Kbps */
/* reg_qeec_max_shaper_rate
* Max shaper information rate.
@@ -3602,6 +3602,21 @@ MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
*/
MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+/* reg_qeec_max_shaper_bs
+ * Max shaper burst size
+ * Burst size is 2^max_shaper_bs * 512 bits
+ * For Spectrum-1: Range is: 5..25
+ * For Spectrum-2: Range is: 11..25
+ * Reserved when ptps = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
+
+#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index)
@@ -3618,8 +3633,7 @@ static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
{
MLXSW_REG_ZERO(qeec, payload);
mlxsw_reg_qeec_local_port_set(payload, local_port);
- mlxsw_reg_qeec_element_hierarchy_set(payload,
- MLXSW_REG_QEEC_HIERARCY_PORT);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, MLXSW_REG_QEEC_HR_PORT);
mlxsw_reg_qeec_ptps_set(payload, ptps);
}
@@ -3749,6 +3763,38 @@ mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
}
+/* QPDP - QoS Port DSCP to Priority Mapping Register
+ * -------------------------------------------------
+ * This register controls the port default Switch Priority and Color. The
+ * default Switch Priority and Color are used for frames where the trust state
+ * uses default values. All member ports of a LAG should be configured with the
+ * same default values.
+ */
+#define MLXSW_REG_QPDP_ID 0x4007
+#define MLXSW_REG_QPDP_LEN 0x8
+
+MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
+
+/* reg_qpdp_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
+
+/* reg_qpdp_switch_prio
+ * Default port Switch Priority (default 0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port,
+ u8 switch_prio)
+{
+ MLXSW_REG_ZERO(qpdp, payload);
+ mlxsw_reg_qpdp_local_port_set(payload, local_port);
+ mlxsw_reg_qpdp_switch_prio_set(payload, switch_prio);
+}
+
/* QPDPM - QoS Port DSCP to Priority Mapping Register
* --------------------------------------------------
* This register controls the mapping from DSCP field to
@@ -5482,6 +5528,7 @@ enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
};
/* reg_htgt_trap_group
@@ -10109,6 +10156,92 @@ static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
}
+/* TIEEM - Tunneling IPinIP Encapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TIEEM_ID 0xA812
+#define MLXSW_REG_TIEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tieem, MLXSW_REG_TIEEM_ID, MLXSW_REG_TIEEM_LEN);
+
+/* reg_tieem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tieem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tineem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tieem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tieem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tieem, payload);
+ mlxsw_reg_tieem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tieem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TIDEM - Tunneling IPinIP Decapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TIDEM_ID 0xA813
+#define MLXSW_REG_TIDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tidem, MLXSW_REG_TIDEM_ID, MLXSW_REG_TIDEM_LEN);
+
+/* reg_tidem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tidem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tidem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tidem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_en, 0x08, 28, 4);
+
+/* reg_tidem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 eip_ecn,
+ bool trap_en, u16 trap_id)
+{
+ MLXSW_REG_ZERO(tidem, payload);
+ mlxsw_reg_tidem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tidem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tidem_eip_ecn_set(payload, eip_ecn);
+ mlxsw_reg_tidem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tidem_trap_id_set(payload, trap_id);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -10581,6 +10714,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(qeec),
MLXSW_REG(qrwe),
MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdp),
MLXSW_REG(qpdpm),
MLXSW_REG(qtctm),
MLXSW_REG(qpsc),
@@ -10652,6 +10786,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(tndem),
MLXSW_REG(tnpc),
MLXSW_REG(tigcr),
+ MLXSW_REG(tieem),
+ MLXSW_REG(tidem),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f7fd5e8fbf96..7358b5bc7eb6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,11 +45,9 @@
#include "spectrum_ptp.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
-
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 2308
+#define MLXSW_SP1_FWREV_SUBMINOR 2714
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -66,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2000
-#define MLXSW_SP2_FWREV_SUBMINOR 2308
+#define MLXSW_SP2_FWREV_SUBMINOR 2714
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -197,6 +195,10 @@ struct mlxsw_sp_ptp_ops {
u64 *data, int data_index);
};
+struct mlxsw_sp_span_ops {
+ u32 (*buffsize_get)(int mtu, u32 speed);
+};
+
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
u16 component_index, u32 *p_max_size,
u8 *p_align_bits, u16 *p_max_write_size)
@@ -423,13 +425,12 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
- mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
- rev->major, rev->minor, rev->subminor);
+ dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
fw_filename);
@@ -860,23 +861,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
-
if (eth_skb_pad(skb)) {
this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
@@ -1215,6 +1210,9 @@ static void update_stats_cache(struct work_struct *work)
periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+ * necessary when port goes down.
+ */
goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
@@ -1796,6 +1794,10 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_PRIO:
return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3539,6 +3541,27 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
+{
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_oper;
+ int err;
+
+ port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
+ port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
+ mlxsw_sp_port->local_port, 0,
+ false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
+ &eth_proto_oper);
+ *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
+ return 0;
+}
+
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight)
@@ -3556,7 +3579,7 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
- u8 next_index, u32 maxrate)
+ u8 next_index, u32 maxrate, u8 burst_size)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char qeec_pl[MLXSW_REG_QEEC_LEN];
@@ -3565,6 +3588,7 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
next_index);
mlxsw_reg_qeec_mase_set(qeec_pl, true);
mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}
@@ -3602,26 +3626,25 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
* one subgroup, which are all member in the same group.
*/
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
- 0);
+ MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, false, 0);
if (err)
return err;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+ MLXSW_REG_QEEC_HR_TC, i, i,
false, 0);
if (err)
return err;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
true, 100);
if (err)
@@ -3633,30 +3656,30 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
* for the initial configuration.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_HR_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i, i,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
}
@@ -3664,7 +3687,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
/* Configure the min shaper for multicast TCs. */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
MLXSW_REG_QEEC_MIS_MIN);
if (err)
@@ -3888,6 +3911,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
+ INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
+ mlxsw_sp_span_speed_update_work);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
@@ -3944,6 +3969,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
@@ -4324,6 +4350,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
}
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ for (i = 0; i < TC_MAX_QUEUE; i++)
+ mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
@@ -4342,9 +4377,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
+ mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
}
}
@@ -4540,10 +4577,16 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -4882,6 +4925,33 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats = mlxsw_sp2_get_stats,
};
+static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
+ .buffsize_get = mlxsw_sp1_span_buffsize_get,
+};
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
+{
+ return 3 * mtu + MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR * speed / 1000;
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
+ .buffsize_get = mlxsw_sp2_span_buffsize_get,
+};
+
+u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
@@ -5103,8 +5173,10 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -5128,6 +5200,31 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -5634,7 +5731,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp2_init,
+ .init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 347bec9d1ecf..a0f1f9dceec5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -140,6 +140,7 @@ struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_span_ops;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -185,8 +186,10 @@ struct mlxsw_sp {
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
const struct mlxsw_sp_ptp_ops *ptp_ops;
+ const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
+ u32 lowest_shaper_bs;
};
static inline struct mlxsw_sp_upper *
@@ -292,6 +295,9 @@ struct mlxsw_sp_port {
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
u8 split_base_local_port;
+ struct {
+ struct delayed_work speed_update_dw;
+ } span;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -471,6 +477,7 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -481,7 +488,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
struct ieee_pfc *my_pfc);
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
- u8 next_index, u32 maxrate);
+ u8 next_index, u32 maxrate, u8 burst_size);
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state);
@@ -501,6 +508,7 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -852,6 +860,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p);
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 150b3a144b83..3d3cca596116 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
struct mlxsw_sp_fid *dummy_fid;
struct rhashtable ruleset_ht;
struct list_head rules;
+ struct mutex rules_lock; /* Protects rules list */
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_ruleset_block_bind;
}
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
block->rule_count++;
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
struct mlxsw_sp_acl_rule *rule;
int err;
- /* Protect internal structures from changes */
- rtnl_lock();
+ mutex_lock(&acl->rules_lock);
list_for_each_entry(rule, &acl->rules, list) {
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
rule);
if (err)
goto err_rule_update;
}
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return 0;
err_rule_update:
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return err;
}
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
+ mutex_init(&acl->rules_lock);
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_acl_ops_init:
+ mutex_destroy(&acl->rules_lock);
mlxsw_sp_fid_put(fid);
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+ mutex_destroy(&acl->rules_lock);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 21296fa7f7fb..49a72a8f1f57 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -160,7 +160,7 @@ static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
u8 weight = ets->tc_tx_bw[i];
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, dwrr, weight);
if (err) {
netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
@@ -198,7 +198,7 @@ err_port_ets_set:
u8 weight = my_ets->tc_tx_bw[i];
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, dwrr, weight);
}
return err;
@@ -369,6 +369,17 @@ err_update_qrwe:
}
static int
+mlxsw_sp_port_dcb_app_update_qpdp(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdp_pl[MLXSW_REG_QPDP_LEN];
+
+ mlxsw_reg_qpdp_pack(qpdp_pl, mlxsw_sp_port->local_port, default_prio);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdp), qpdp_pl);
+}
+
+static int
mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
struct dcb_ieee_app_dscp_map *map)
{
@@ -405,6 +416,12 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
int err;
default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ err = mlxsw_sp_port_dcb_app_update_qpdp(mlxsw_sp_port, default_prio);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure port default priority\n");
+ return err;
+ }
+
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
@@ -507,9 +524,9 @@ static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0,
- maxrate->tc_maxrate[i]);
+ maxrate->tc_maxrate[i], 0);
if (err) {
netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
goto err_port_ets_maxrate_set;
@@ -523,8 +540,9 @@ static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
err_port_ets_maxrate_set:
for (i--; i >= 0; i--)
mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
- i, 0, my_maxrate->tc_maxrate[i]);
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0,
+ my_maxrate->tc_maxrate[i], 0);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 6400cd644b7a..a8525992528f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -3,8 +3,10 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
+#include <net/inet_ecn.h>
#include "spectrum_ipip.h"
+#include "reg.h"
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
@@ -338,3 +340,61 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
};
+
+static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tieem_pl[MLXSW_REG_TIEEM_LEN];
+
+ mlxsw_reg_tieem_pack(tieem_pl, inner_ecn, outer_ecn);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tieem), tieem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ int err;
+
+ err = mlxsw_sp_ipip_ecn_encap_init_one(mlxsw_sp, i, outer_ecn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tidem_pl[MLXSW_REG_TIDEM_LEN];
+ bool trap_en, set_ce = false;
+ u8 new_inner_ecn;
+
+ trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+
+ mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i, j, err;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ err = mlxsw_sp_ipip_ecn_decap_init_one(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ec2ff3d7f41c..34f7c3501b08 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -920,6 +920,7 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
egr_types = 0xff;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
return -ERANGE;
}
@@ -1015,27 +1016,17 @@ mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
{
- const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_oper, speed;
bool ptps = false;
int err, i;
+ u32 speed;
if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
- port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
- port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
- mlxsw_sp_port->local_port, 0,
- false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
if (err)
return err;
- port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
- &eth_proto_oper);
- speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
ptps = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 46d43cfd04e9..79a2801d59f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -18,6 +18,8 @@ enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_NO_QDISC,
MLXSW_SP_QDISC_RED,
MLXSW_SP_QDISC_PRIO,
+ MLXSW_SP_QDISC_ETS,
+ MLXSW_SP_QDISC_TBF,
};
struct mlxsw_sp_qdisc_ops {
@@ -195,6 +197,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->backlog[tclass_num] +
+ xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->tail_drop[tclass_num] +
+ xstats->tail_drop[tclass_num + 8];
+}
+
static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
u8 prio_bitmap, u64 *tx_packets,
@@ -212,6 +228,70 @@ mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
}
}
+static void
+mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 *p_tx_bytes, u64 *p_tx_packets,
+ u64 *p_drops, u64 *p_backlog)
+{
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_port_xstats *xstats;
+ u64 tx_bytes, tx_packets;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &tx_packets, &tx_bytes);
+
+ *p_tx_packets += tx_packets;
+ *p_tx_bytes += tx_bytes;
+ *p_drops += xstats->wred_drop[tclass_num] +
+ mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
+ *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
+}
+
+static void
+mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 tx_bytes, u64 tx_packets,
+ u64 drops, u64 backlog,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes -= stats_base->tx_bytes;
+ tx_packets -= stats_base->tx_packets;
+ drops -= stats_base->drops;
+ backlog -= stats_base->backlog;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
+
+ stats_base->backlog += backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+}
+
+static void
+mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -269,7 +349,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
&stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
- red_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
stats_base->drops = red_base->prob_drop + red_base->pdrop;
@@ -342,19 +422,28 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
{
- struct tc_red_qopt_offload_params *p = params;
u64 backlog;
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
- p->qstats->backlog -= backlog;
+ qstats->backlog -= backlog;
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -370,7 +459,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
marks = xstats->ecn - xstats_base->prob_mark;
- pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+ xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
@@ -387,40 +477,21 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops, backlog;
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
+ u64 overlimits;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
- &tx_packets, &tx_bytes);
- tx_bytes = tx_bytes - stats_base->tx_bytes;
- tx_packets = tx_packets - stats_base->tx_packets;
-
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
- drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- stats_base->drops;
- backlog = xstats->backlog[tclass_num];
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
stats_ptr->qstats->overlimits += overlimits;
- stats_ptr->qstats->drops += drops;
- stats_ptr->qstats->backlog +=
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- backlog) -
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- stats_base->backlog);
-
- stats_base->backlog = backlog;
- stats_base->drops += drops;
stats_base->overlimits += overlimits;
- stats_base->tx_bytes += tx_bytes;
- stats_base->tx_packets += tx_packets;
+
return 0;
}
@@ -470,15 +541,215 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+static void
+mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ u64 backlog_cells = 0;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog_cells);
+
+ mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
+ mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
+ mlxsw_sp_qdisc->stats_base.drops = drops;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
static int
-mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ mlxsw_sp_qdisc->tclass_num, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+}
+
+static int
+mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 max_size, u8 *p_burst_size)
+{
+ /* TBF burst size is configured in bytes. The ASIC burst size value is
+ * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
+ */
+ u32 bs512 = max_size / 64;
+ u8 bs = fls(bs512);
+
+ if (!bs)
+ return -EINVAL;
+ --bs;
+
+ /* Demand a power of two. */
+ if ((1 << bs) != bs512)
+ return -EINVAL;
+
+ if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
+ bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
+ return -EINVAL;
+
+ *p_burst_size = bs;
+ return 0;
+}
+
+static u32
+mlxsw_sp_qdisc_tbf_max_size(u8 bs)
+{
+ return (1U << bs) * 64;
+}
+
+static u64
+mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
+{
+ /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
+ * Kbits/s.
+ */
+ return p->rate.rate_bytes_ps / 1000 * 8;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
+ dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: rate of %lluKbps must be below %u\n",
+ rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (err) {
+ u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
+
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
+ p->max_size,
+ mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
+ mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (WARN_ON_ONCE(err))
+ /* check_params above was supposed to reject this value. */
+ return -EINVAL;
+
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
+ * to shaping. That is unlike RED, however UC queue lengths are going to
+ * be different than MC ones due to different pool and quota
+ * configurations, so the configuration is not applicable. For shaper on
+ * the other hand, subjecting the overall stream to the configured
+ * shaper makes sense. Also note that that is what we do for
+ * ieee_setmaxrate().
+ */
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ mlxsw_sp_qdisc->tclass_num, 0,
+ rate_kbps, burst_size);
+}
+
+static void
+mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
+ .type = MLXSW_SP_QDISC_TBF,
+ .check_params = mlxsw_sp_qdisc_tbf_check_params,
+ .replace = mlxsw_sp_qdisc_tbf_replace,
+ .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
+ .destroy = mlxsw_sp_qdisc_tbf_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_TBF_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_tbf,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_TBF))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_TBF_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_TBF_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
&mlxsw_sp_port->tclass_qdiscs[i]);
mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
@@ -488,36 +759,58 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct tc_prio_qopt_offload_params *p = params;
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+}
- if (p->bands > IEEE_8021QAZ_MAX_TCS)
+static int
+__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
+{
+ if (nbands > IEEE_8021QAZ_MAX_TCS)
return -EOPNOTSUPP;
return 0;
}
static int
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct tc_prio_qopt_offload_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned int nbands,
+ const unsigned int *quanta,
+ const unsigned int *weights,
+ const u8 *priomap)
+{
struct mlxsw_sp_qdisc *child_qdisc;
int tclass, i, band, backlog;
u8 old_priomap;
int err;
- for (band = 0; band < p->bands; band++) {
+ for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass, 0, !!quanta[band],
+ weights[band]);
+ if (err)
+ return err;
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (p->priomap[i] == band) {
+ if (priomap[i] == band) {
child_qdisc->prio_bitmap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
@@ -540,21 +833,46 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass, 0, false, 0);
}
return 0;
}
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ zeroes, zeroes, p->priomap);
+}
+
+static void
+__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
+{
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ qstats->backlog -= backlog;
+}
+
static void
mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
- u64 backlog;
- backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_qdisc->stats_base.backlog);
- p->qstats->backlog -= backlog;
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
}
static int
@@ -562,37 +880,23 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
- struct mlxsw_sp_qdisc_stats *stats_base;
- struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
+ struct mlxsw_sp_qdisc *tc_qdisc;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
int i;
- xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
- stats_base = &mlxsw_sp_qdisc->stats_base;
-
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
- tx_packets = stats->tx_packets - stats_base->tx_packets;
-
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- drops += xstats->tail_drop[i];
- drops += xstats->wred_drop[i];
- backlog += xstats->backlog[i];
+ tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
}
- drops = drops - stats_base->drops;
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
- stats_ptr->qstats->drops += drops;
- stats_ptr->qstats->backlog +=
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- backlog) -
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- stats_base->backlog);
- stats_base->backlog = backlog;
- stats_base->drops += drops;
- stats_base->tx_bytes += tx_bytes;
- stats_base->tx_packets += tx_packets;
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
return 0;
}
@@ -614,7 +918,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->drops = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
stats_base->drops += xstats->wred_drop[i];
}
@@ -631,27 +935,93 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
};
-/* Grafting is not supported in mlxsw. It will result in un-offloading of the
- * grafted qdisc as well as the qdisc in the qdisc new location.
- * (However, if the graft is to the location where the qdisc is already at, it
- * will be ignored completely and won't cause un-offloading).
+static int
+mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ p->quanta, p->weights, p->priomap);
+}
+
+static void
+mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
+ .type = MLXSW_SP_QDISC_ETS,
+ .check_params = mlxsw_sp_qdisc_ets_check_params,
+ .replace = mlxsw_sp_qdisc_ets_replace,
+ .unoffload = mlxsw_sp_qdisc_ets_unoffload,
+ .destroy = mlxsw_sp_qdisc_ets_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
+ * graph is free of cycles). These operations do not change the parent handle
+ * though, which means it can be incomplete (if there is more than one class
+ * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
+ * linked to a different class and then removed from the original class).
+ *
+ * E.g. consider this sequence of operations:
+ *
+ * # tc qdisc add dev swp1 root handle 1: prio
+ * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
+ * RED: set bandwidth to 10Mbit
+ * # tc qdisc link dev swp1 handle 13: parent 1:2
+ *
+ * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
+ * child. But RED will still only claim that 1:3 is its parent. If it's removed
+ * from that band, its only parent will be 1:2, but it will continue to claim
+ * that it is in fact 1:3.
+ *
+ * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
+ * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
+ * notification to offload the child Qdisc, based on its parent handle, and use
+ * the graft operation to validate that the class where the child is actually
+ * grafted corresponds to the parent handle. If the two don't match, we
+ * unoffload the child.
*/
static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
+__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
- int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
- /* Check if the grafted qdisc is already in its "new" location. If so -
- * nothing needs to be done.
- */
- if (p->band < IEEE_8021QAZ_MAX_TCS &&
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
+ if (band < IEEE_8021QAZ_MAX_TCS &&
+ mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
return 0;
- if (!p->child_handle) {
+ if (!child_handle) {
/* This is an invisible FIFO replacing the original Qdisc.
* Ignore it--the original Qdisc's destroy will follow.
*/
@@ -662,7 +1032,7 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
* unoffload it.
*/
old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
- p->child_handle);
+ child_handle);
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
@@ -671,6 +1041,15 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static int
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_prio_qopt_offload_graft_params *p)
+{
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->band, p->child_handle);
+}
+
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
@@ -704,6 +1083,40 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_ETS_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_ets,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_ETS))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_ETS_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_ETS_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_ETS_GRAFT:
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 8290e82240fc..ce707723f8cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -382,9 +382,10 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib_entry;
struct mlxsw_sp_fib_node {
- struct list_head entry_list;
+ struct mlxsw_sp_fib_entry *fib_entry;
struct list_head list;
struct rhash_head ht_node;
struct mlxsw_sp_fib *fib;
@@ -397,7 +398,6 @@ struct mlxsw_sp_fib_entry_decap {
};
struct mlxsw_sp_fib_entry {
- struct list_head list;
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
@@ -1162,7 +1162,6 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
const union mlxsw_sp_l3addr *addr,
enum mlxsw_sp_fib_entry_type type)
{
- struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_node *fib_node;
unsigned char addr_prefix_len;
struct mlxsw_sp_fib *fib;
@@ -1191,15 +1190,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
addr_prefix_len);
- if (!fib_node || list_empty(&fib_node->entry_list))
+ if (!fib_node || fib_node->fib_entry->type != type)
return NULL;
- fib_entry = list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list);
- if (fib_entry->type != type)
- return NULL;
-
- return fib_entry;
+ return fib_node->fib_entry;
}
/* Given an IPIP entry, find the corresponding decap route. */
@@ -1209,7 +1203,6 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
{
static struct mlxsw_sp_fib_node *fib_node;
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct mlxsw_sp_fib_entry *fib_entry;
unsigned char saddr_prefix_len;
union mlxsw_sp_l3addr saddr;
struct mlxsw_sp_fib *ul_fib;
@@ -1244,15 +1237,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
saddr_prefix_len);
- if (!fib_node || list_empty(&fib_node->entry_list))
+ if (!fib_node ||
+ fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
return NULL;
- fib_entry = list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list);
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
- return NULL;
-
- return fib_entry;
+ return fib_node->fib_entry;
}
static struct mlxsw_sp_ipip_entry *
@@ -3231,10 +3220,6 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static bool
-mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib_entry *fib_entry);
-
static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -3243,9 +3228,6 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
- fib_entry))
- continue;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
return err;
@@ -3253,24 +3235,6 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void
-mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op, int err);
-
-static void
-mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
-{
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- struct mlxsw_sp_fib_entry *fib_entry;
-
- list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
- fib_entry))
- continue;
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
- }
-}
-
static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
{
/* Valid sizes for an adjacency group are:
@@ -3374,6 +3338,73 @@ mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
}
}
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
+
+static void
+mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+
+ if (nh->offloaded)
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct mlxsw_sp_nexthop *nh;
+
+ nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
+ if (nh && nh->offloaded)
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+
+ /* Unfortunately, in IPv6 the route and the nexthop are described by
+ * the same struct, so we need to iterate over all the routes using the
+ * nexthop group and set / clear the offload indication for them.
+ */
+ list_for_each_entry(fib6_entry, &nh_grp->fib_list,
+ common.nexthop_group_node)
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+}
+
+static void
+mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
+ case AF_INET:
+ mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ case AF_INET6:
+ mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ }
+}
+
static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -3447,6 +3478,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
+
if (!old_adj_index_valid) {
/* The trap was set for fib entries, so we have to call
* fib entry update to unset it and use adjacency index.
@@ -3468,9 +3501,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
- /* Offload state within the group changed, so update the flags. */
- mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
-
return;
set_trap:
@@ -3483,6 +3513,7 @@ set_trap:
err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
nh_grp->ecmp_size, nh_grp->adj_index);
@@ -3845,7 +3876,7 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
key.fib_nh = fib_nh;
nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
- if (WARN_ON_ONCE(!nh))
+ if (!nh)
return;
switch (event) {
@@ -4065,131 +4096,128 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
}
static void
-mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- int i;
-
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
- nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- return;
- }
-
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
+ bool should_offload;
- if (nh->offloaded)
- nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- else
- nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_entry->tos;
+ fri.type = fib4_entry->type;
+ fri.offload = should_offload;
+ fri.trap = !should_offload;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
static void
-mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- int i;
-
- if (!list_is_singular(&nh_grp->fib_list))
- return;
-
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
- nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_entry->tos;
+ fri.type = fib4_entry->type;
+ fri.offload = false;
+ fri.trap = false;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
static void
-mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ bool should_offload;
+
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
-
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
- list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- return;
- }
-
- list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
- struct mlxsw_sp_nexthop *nh;
-
- nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
- if (nh && nh->offloaded)
- fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- else
- fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
+ !should_offload);
}
static void
-mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
- list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct fib6_info *rt = mlxsw_sp_rt6->rt;
-
- rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
}
-static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_fib4_entry_offload_set(fib_entry);
+ mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_sp_fib6_entry_offload_set(fib_entry);
+ mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
}
}
static void
-mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_fib4_entry_offload_unset(fib_entry);
+ mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_sp_fib6_entry_offload_unset(fib_entry);
+ mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
}
}
static void
-mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op, int err)
+mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_REG_RALUE_OP_WRITE_DELETE:
- return mlxsw_sp_fib_entry_offload_unset(fib_entry);
case MLXSW_REG_RALUE_OP_WRITE_WRITE:
- if (err)
- return;
- if (mlxsw_sp_fib_entry_should_offload(fib_entry))
- mlxsw_sp_fib_entry_offload_set(fib_entry);
- else
- mlxsw_sp_fib_entry_offload_unset(fib_entry);
- return;
+ mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
+ break;
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
+ break;
default:
- return;
+ break;
}
}
@@ -4416,7 +4444,10 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{
int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
return err;
}
@@ -4491,6 +4522,19 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
+ break;
+ default:
+ break;
+ }
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -4523,6 +4567,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib4_entry;
err_nexthop4_group_get:
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
err_fib4_entry_type_set:
kfree(fib4_entry);
return ERR_PTR(err);
@@ -4532,6 +4577,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib4_entry *fib4_entry)
{
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
kfree(fib4_entry);
}
@@ -4555,15 +4601,14 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (!fib_node)
return NULL;
- list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
- if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
- fib4_entry->type == fen_info->type &&
- mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
- fen_info->fi) {
- return fib4_entry;
- }
- }
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == fen_info->tb_id &&
+ fib4_entry->tos == fen_info->tos &&
+ fib4_entry->type == fen_info->type &&
+ mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
+ fen_info->fi)
+ return fib4_entry;
return NULL;
}
@@ -4611,7 +4656,6 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
if (!fib_node)
return NULL;
- INIT_LIST_HEAD(&fib_node->entry_list);
list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len;
@@ -4622,18 +4666,9 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
list_del(&fib_node->list);
- WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node);
}
-static bool
-mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib_entry *fib_entry)
-{
- return list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list) == fib_entry;
-}
-
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
@@ -4773,200 +4808,48 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_vr *vr = fib_node->fib->vr;
- if (!list_empty(&fib_node->entry_list))
+ if (fib_node->fib_entry)
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static struct mlxsw_sp_fib4_entry *
-mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib4_entry *new4_entry)
-{
- struct mlxsw_sp_fib4_entry *fib4_entry;
-
- list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
- if (fib4_entry->tb_id > new4_entry->tb_id)
- continue;
- if (fib4_entry->tb_id != new4_entry->tb_id)
- break;
- if (fib4_entry->tos > new4_entry->tos)
- continue;
- if (fib4_entry->prio >= new4_entry->prio ||
- fib4_entry->tos < new4_entry->tos)
- return fib4_entry;
- }
-
- return NULL;
-}
-
-static int
-mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
- struct mlxsw_sp_fib4_entry *new4_entry)
-{
- struct mlxsw_sp_fib_node *fib_node;
-
- if (WARN_ON(!fib4_entry))
- return -EINVAL;
-
- fib_node = fib4_entry->common.fib_node;
- list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
- common.list) {
- if (fib4_entry->tb_id != new4_entry->tb_id ||
- fib4_entry->tos != new4_entry->tos ||
- fib4_entry->prio != new4_entry->prio)
- break;
- }
-
- list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
- return 0;
-}
-
-static int
-mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
- bool replace, bool append)
-{
- struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
- struct mlxsw_sp_fib4_entry *fib4_entry;
-
- fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
-
- if (append)
- return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
- if (replace && WARN_ON(!fib4_entry))
- return -EINVAL;
-
- /* Insert new entry before replaced one, so that we can later
- * remove the second.
- */
- if (fib4_entry) {
- list_add_tail(&new4_entry->common.list,
- &fib4_entry->common.list);
- } else {
- struct mlxsw_sp_fib4_entry *last;
-
- list_for_each_entry(last, &fib_node->entry_list, common.list) {
- if (new4_entry->tb_id > last->tb_id)
- break;
- fib4_entry = last;
- }
-
- if (fib4_entry)
- list_add(&new4_entry->common.list,
- &fib4_entry->common.list);
- else
- list_add(&new4_entry->common.list,
- &fib_node->entry_list);
- }
-
- return 0;
-}
-
-static void
-mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
-{
- list_del(&fib4_entry->common.list);
-}
-
-static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
-
- if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
- return 0;
-
- /* To prevent packet loss, overwrite the previously offloaded
- * entry.
- */
- if (!list_is_singular(&fib_node->entry_list)) {
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
-
- mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
- }
-
- return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
-}
-
-static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
+static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
-
- if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
- return;
-
- /* Promote the next entry by overwriting the deleted entry */
- if (!list_is_singular(&fib_node->entry_list)) {
- struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
-
- mlxsw_sp_fib_entry_update(mlxsw_sp, n);
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
- return;
- }
-
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
-}
-
-static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry,
- bool replace, bool append)
-{
int err;
- err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
- if (err)
- return err;
+ fib_node->fib_entry = fib_entry;
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
- goto err_fib_node_entry_add;
+ goto err_fib_entry_update;
return 0;
-err_fib_node_entry_add:
- mlxsw_sp_fib4_node_list_remove(fib4_entry);
+err_fib_entry_update:
+ fib_node->fib_entry = NULL;
return err;
}
static void
-mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry)
-{
- mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib4_node_list_remove(fib4_entry);
-
- if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
- mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
-}
-
-static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry,
- bool replace)
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
- struct mlxsw_sp_fib4_entry *replaced;
-
- if (!replace)
- return;
-
- /* We inserted the new entry before replaced one */
- replaced = list_next_entry(fib4_entry, common.list);
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
- mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ fib_node->fib_entry = NULL;
}
static int
-mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
- const struct fib_entry_notifier_info *fen_info,
- bool replace, bool append)
+mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
struct mlxsw_sp_fib_node *fib_node;
int err;
@@ -4989,18 +4872,26 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
goto err_fib4_entry_create;
}
- err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
- append);
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
- goto err_fib4_node_entry_link;
+ goto err_fib_node_entry_link;
}
- mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
+
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
+ common);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
return 0;
-err_fib4_node_entry_link:
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
err_fib4_entry_create:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
@@ -5021,7 +4912,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
return;
fib_node = fib4_entry->common.fib_node;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
@@ -5083,13 +4974,6 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
kfree(mlxsw_sp_rt6);
}
-static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
-{
- /* RTF_CACHE routes are ignored */
- return !(rt->fib6_flags & RTF_ADDRCONF) &&
- rt->fib6_nh->fib_nh_gw_family;
-}
-
static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
@@ -5097,37 +4981,6 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
list)->rt;
}
-static struct mlxsw_sp_fib6_entry *
-mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool replace)
-{
- struct mlxsw_sp_fib6_entry *fib6_entry;
-
- if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
- return NULL;
-
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
- * virtual router.
- */
- if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
- continue;
- if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
- break;
- if (rt->fib6_metric < nrt->fib6_metric)
- continue;
- if (rt->fib6_metric == nrt->fib6_metric &&
- mlxsw_sp_fib6_rt_can_mp(rt))
- return fib6_entry;
- if (rt->fib6_metric > nrt->fib6_metric)
- break;
- }
-
- return NULL;
-}
-
static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
const struct fib6_info *rt)
@@ -5313,6 +5166,11 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
&nh_grp->fib_list);
fib6_entry->common.nh_group = nh_grp;
+ /* The route and the nexthop are described by the same struct, so we
+ * need to the update the nexthop offload indication for the new route.
+ */
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+
return 0;
}
@@ -5345,16 +5203,16 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
- goto err_fib_node_entry_add;
+ goto err_fib_entry_update;
if (list_empty(&old_nh_grp->fib_list))
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
return 0;
-err_fib_node_entry_add:
+err_fib_entry_update:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
err_nexthop6_group_get:
list_add_tail(&fib6_entry->common.nexthop_group_node,
@@ -5519,112 +5377,13 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_fib6_entry *
-mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool replace)
-{
- struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
-
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
- continue;
- if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
- break;
- if (replace && rt->fib6_metric == nrt->fib6_metric) {
- if (mlxsw_sp_fib6_rt_can_mp(rt) ==
- mlxsw_sp_fib6_rt_can_mp(nrt))
- return fib6_entry;
- if (mlxsw_sp_fib6_rt_can_mp(nrt))
- fallback = fallback ?: fib6_entry;
- }
- if (rt->fib6_metric > nrt->fib6_metric)
- return fallback ?: fib6_entry;
- }
-
- return fallback;
-}
-
-static int
-mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
- bool *p_replace)
-{
- struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
- struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
- struct mlxsw_sp_fib6_entry *fib6_entry;
-
- fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace);
-
- if (*p_replace && !fib6_entry)
- *p_replace = false;
-
- if (fib6_entry) {
- list_add_tail(&new6_entry->common.list,
- &fib6_entry->common.list);
- } else {
- struct mlxsw_sp_fib6_entry *last;
-
- list_for_each_entry(last, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
-
- if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
- break;
- fib6_entry = last;
- }
-
- if (fib6_entry)
- list_add(&new6_entry->common.list,
- &fib6_entry->common.list);
- else
- list_add(&new6_entry->common.list,
- &fib_node->entry_list);
- }
-
- return 0;
-}
-
-static void
-mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
-{
- list_del(&fib6_entry->common.list);
-}
-
-static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry,
- bool *p_replace)
-{
- int err;
-
- err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace);
- if (err)
- return err;
-
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
- if (err)
- goto err_fib_node_entry_add;
-
- return 0;
-
-err_fib_node_entry_add:
- mlxsw_sp_fib6_node_list_remove(fib6_entry);
- return err;
-}
-
-static void
-mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
-{
- mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
- mlxsw_sp_fib6_node_list_remove(fib6_entry);
-}
-
-static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct mlxsw_sp_fib *fib;
+ struct fib6_info *cmp_rt;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
@@ -5638,40 +5397,23 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (!fib_node)
return NULL;
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
- rt->fib6_metric == iter_rt->fib6_metric &&
- mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
- return fib6_entry;
- }
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
+ rt->fib6_metric == cmp_rt->fib6_metric &&
+ mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
+ return fib6_entry;
return NULL;
}
-static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry,
- bool replace)
-{
- struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
- struct mlxsw_sp_fib6_entry *replaced;
-
- if (!replace)
- return;
-
- replaced = list_next_entry(fib6_entry, common.list);
-
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
- mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
-}
-
-static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6, bool replace)
+static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
- struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
int err;
@@ -5693,18 +5435,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(fib_node))
return PTR_ERR(fib_node);
- /* Before creating a new entry, try to append route to an existing
- * multipath entry.
- */
- fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
- if (fib6_entry) {
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry,
- rt_arr, nrt6);
- if (err)
- goto err_fib6_entry_nexthop_add;
- return 0;
- }
-
fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
nrt6);
if (IS_ERR(fib6_entry)) {
@@ -5712,17 +5442,70 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
goto err_fib6_entry_create;
}
- err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace);
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
- goto err_fib6_node_entry_link;
+ goto err_fib_node_entry_link;
+
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
- mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
+ common);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
return 0;
-err_fib6_node_entry_link:
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
err_fib6_entry_create:
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
+}
+
+static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
+ int err;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ if (rt->fib6_src.plen)
+ return -EINVAL;
+
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return 0;
+
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib_node))
+ return PTR_ERR(fib_node);
+
+ if (WARN_ON_ONCE(!fib_node->fib_entry)) {
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return -EINVAL;
+ }
+
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ if (err)
+ goto err_fib6_entry_nexthop_add;
+
+ return 0;
+
err_fib6_entry_nexthop_add:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
return err;
@@ -5762,7 +5545,7 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
fib_node = fib6_entry->common.fib_node;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
@@ -5916,39 +5699,25 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
-
- list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
- common.list) {
- bool do_break = &tmp->common.list == &fib_node->entry_list;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
- mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- /* Break when entry list is empty and node was freed.
- * Otherwise, we'll access freed memory in the next
- * iteration.
- */
- if (do_break)
- break;
- }
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
-
- list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
- common.list) {
- bool do_break = &tmp->common.list == &fib_node->entry_list;
+ struct mlxsw_sp_fib6_entry *fib6_entry;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
- mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- if (do_break)
- break;
- }
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -6099,7 +5868,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace, append;
int err;
/* Protect internal structures from changes */
@@ -6107,13 +5875,9 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
- err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
- replace, append);
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
fib_info_put(fib_work->fen_info.fi);
@@ -6138,20 +5902,24 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace;
int err;
rtnl_lock();
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fib6_add(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6,
- replace);
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
+ if (err)
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
@@ -6216,8 +5984,6 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
@@ -6245,7 +6011,7 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
@@ -6348,9 +6114,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
return notifier_from_errno(err);
- case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND:
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
return notifier_from_errno(-EINVAL);
@@ -8025,8 +7791,18 @@ mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
+ int err;
+
mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
+
+ err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
+ if (err)
+ return err;
+
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index cc1de91e8217..c9b94f435cdd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -104,4 +104,7 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
return !memcmp(addr1, addr2, sizeof(*addr1));
}
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 200d324e6d99..0cdd7954a085 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -748,33 +748,50 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
return false;
}
-static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
+static int
+mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
- return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ u32 buffsize;
+ u32 speed;
+ int err;
+
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
+ if (err)
+ return err;
+ if (speed == SPEED_UNKNOWN)
+ speed = 0;
+
+ buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
- if (mlxsw_sp_span_is_egress_mirror(port)) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+ if (mlxsw_sp_span_is_egress_mirror(port))
+ return mlxsw_sp_span_port_buffsize_update(port, mtu);
+ return 0;
+}
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
- return err;
- }
- }
+void mlxsw_sp_span_speed_update_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp_port *mlxsw_sp_port;
- return 0;
+ mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
+ span.speed_update_dw);
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the speed value.
+ */
+ if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
+ mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
+ mlxsw_sp_port->dev->mtu);
}
static struct mlxsw_sp_span_inspected_port *
@@ -836,15 +853,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
- port->dev->mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
+ if (err)
return err;
- }
}
if (bind) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 5e04252f2a11..59724335525f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -74,5 +74,6 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry);
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+void mlxsw_sp_span_speed_update_work(struct work_struct *work);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index e0d7c49ffae0..60205aa3f6a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -9,6 +9,20 @@
#include "reg.h"
#include "spectrum.h"
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/mlxsw.rst
+ */
+enum {
+ DEVLINK_MLXSW_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+};
+
+#define DEVLINK_MLXSW_TRAP_NAME_IRIF_DISABLED \
+ "irif_disabled"
+#define DEVLINK_MLXSW_TRAP_NAME_ERIF_DISABLED \
+ "erif_disabled"
+
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
@@ -21,6 +35,12 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
+ DEVLINK_MLXSW_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
@@ -58,6 +78,11 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
+ MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
+ MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
+ MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
};
static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
@@ -90,6 +115,15 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, ROUTER_EXP, TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
@@ -123,6 +157,13 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+ DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -304,8 +345,9 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
u32 rate;
switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: /* fall through */
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS: /* fall through */
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
policer_id = MLXSW_SP_DISCARD_POLICER_ID;
ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
is_bytes = false;
@@ -342,6 +384,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
priority = 0;
tc = 1;
break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index de6cb22f68b1..f0e98ec8f1ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 0c1c142bb6b0..12e1fa998d42 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -67,6 +67,7 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
@@ -80,12 +81,20 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_CLASS_E = 0x164,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_MC_DMAC = 0x168,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_DIP = 0x169,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_DEC_PKT = 0x188,
+ MLXSW_TRAP_ID_DISCARD_OVERLAY_SMAC_MC = 0x190,
MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index da329ca115cc..f3f6dfe3eddc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1103,7 +1103,7 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
__ks8842_start_new_rx_dma(netdev);
}
-static void ks8842_tx_timeout(struct net_device *netdev)
+static void ks8842_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e102e1560ac7..d1444ba36e10 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4896,7 +4896,7 @@ unlock:
* triggered to free up resources so that the transmit routine can continue
* sending out packets. The hardware is reset to correct the problem.
*/
-static void netdev_tx_timeout(struct net_device *dev)
+static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
static unsigned long last_reset;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 0567e4f387a5..09cdc2f2e7ff 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1325,7 +1325,7 @@ static irqreturn_t enc28j60_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void enc28j60_tx_timeout(struct net_device *ndev)
+static void enc28j60_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct enc28j60_net *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 52c41d11f565..39925e4bf2ec 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -892,7 +892,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
}
/* Deal with a transmit timeout */
-static void encx24j600_tx_timeout(struct net_device *dev)
+static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct encx24j600_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index afe52463dc57..9399f6a98748 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1265,6 +1265,9 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ret = -ERANGE;
+ break;
default:
netif_warn(adapter, drv, adapter->netdev,
" tx_type = %d, UNKNOWN\n", config.tx_type);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 985b46d7e3d1..86d543ab1ab9 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -500,13 +500,14 @@ EXPORT_SYMBOL(ocelot_port_enable);
static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
int err;
if (priv->serdes) {
err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
- priv->phy_mode);
+ ocelot_port->phy_mode);
if (err) {
netdev_err(dev, "Could not set mode of SerDes\n");
return err;
@@ -514,7 +515,7 @@ static int ocelot_port_open(struct net_device *dev)
}
err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
- priv->phy_mode);
+ ocelot_port->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index c259114c48fd..04372ba72fec 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -18,11 +18,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
+#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot.h>
-#include "ocelot_ana.h"
-#include "ocelot_dev.h"
-#include "ocelot_qsys.h"
#include "ocelot_rew.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
@@ -68,7 +68,6 @@ struct ocelot_port_private {
u8 vlan_aware;
- phy_interface_t phy_mode;
struct phy *serdes;
struct ocelot_port_tc tc;
diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h
deleted file mode 100644
index 841c6ec22b64..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_ana.h
+++ /dev/null
@@ -1,625 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_ANA_H_
-#define _MSCC_OCELOT_ANA_H_
-
-#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
-#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
-#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
-#define ANA_ANAGEFIL_PID_EN BIT(19)
-#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
-#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
-#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
-#define ANA_ANAGEFIL_VID_EN BIT(13)
-#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
-#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
-
-#define ANA_STORMLIMIT_CFG_RSZ 0x4
-
-#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
-#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
-#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
-#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
-#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
-#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
-
-#define ANA_AUTOAGE_AGE_FAST BIT(21)
-#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
-#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
-#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
-#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
-
-#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
-#define ANA_MACTOPTIONS_SHADOW BIT(0)
-
-#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
-#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
-#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
-#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
-#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
-#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
-#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
-#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
-#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
-#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
-#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
-#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
-#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
-#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
-
-#define ANA_FLOODING_RSZ 0x4
-
-#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
-#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
-#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
-#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
-#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
-
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
-#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
-
-#define ANA_SFLOW_CFG_RSZ 0x4
-
-#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
-#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
-#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
-#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
-#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
-
-#define ANA_PORT_MODE_RSZ 0x4
-
-#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
-#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
-#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
-#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
-
-#define ANA_CUT_THRU_CFG_RSZ 0x4
-
-#define ANA_PGID_PGID_RSZ 0x4
-
-#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
-#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
-#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
-#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
-#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
-
-#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
-#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
-#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
-#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
-#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
-
-#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
-#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
-#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
-#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
-#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
-#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
-
-#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
-#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
-#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
-#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
-#define ANA_TABLES_MACACCESS_VALID BIT(11)
-#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
-#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
-#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
-#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
-#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
-#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
-#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
-#define MACACCESS_CMD_IDLE 0
-#define MACACCESS_CMD_LEARN 1
-#define MACACCESS_CMD_FORGET 2
-#define MACACCESS_CMD_AGE 3
-#define MACACCESS_CMD_GET_NEXT 4
-#define MACACCESS_CMD_INIT 5
-#define MACACCESS_CMD_READ 6
-#define MACACCESS_CMD_WRITE 7
-
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
-#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
-#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
-#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
-#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
-
-#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
-#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
-#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
-#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
-#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
-#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
-#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
-#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
-
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
-#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
-#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
-#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
-#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
-
-#define ANA_TABLES_ENTRYLIM_RSZ 0x4
-
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
-#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
-#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
-
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
-#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
-#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
-#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
-#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
-#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
-#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
-#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
-#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
-#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
-#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
-#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
-#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
-
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
-#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
-#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
-
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
-
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
-#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
-#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
-#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
-#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
-#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
-#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
-#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
-#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
-#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
-
-#define ANA_MSTI_STATE_RSZ 0x4
-
-#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
-
-#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
-#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
-#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
-
-#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
-
-#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
-
-#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
-#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
-#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
-
-#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
-
-#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
-#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
-#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
-#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
-#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
-
-#define ANA_PORT_VLAN_CFG_GSZ 0x100
-
-#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
-#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
-#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
-#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
-#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
-#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
-#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
-#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
-#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
-#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
-
-#define ANA_PORT_DROP_CFG_GSZ 0x100
-
-#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
-#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
-#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
-#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
-#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
-#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
-#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
-
-#define ANA_PORT_QOS_CFG_GSZ 0x100
-
-#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
-#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
-#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
-#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
-
-#define ANA_PORT_VCAP_CFG_GSZ 0x100
-
-#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
-#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
-
-#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
-#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
-
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
-
-#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
-
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
-#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
-#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
-
-#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
-#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
-
-#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
-#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
-#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
-
-#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
-#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
-#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
-#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
-#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
-#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
-#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
-#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
-
-#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_PORT_CFG_GSZ 0x100
-
-#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
-#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
-#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
-#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
-#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
-#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
-#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
-#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
-#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
-#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
-#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
-#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
-#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
-#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
-#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
-
-#define ANA_PORT_POL_CFG_GSZ 0x100
-
-#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
-#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
-#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
-#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
-#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
-
-#define ANA_PORT_PTP_CFG_GSZ 0x100
-
-#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
-
-#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
-
-#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
-
-#define ANA_PORT_SFID_CFG_GSZ 0x100
-#define ANA_PORT_SFID_CFG_RSZ 0x4
-
-#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
-#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
-#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
-
-#define ANA_PFC_PFC_CFG_GSZ 0x40
-
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
-#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
-
-#define ANA_PFC_PFC_TIMER_GSZ 0x40
-#define ANA_PFC_PFC_TIMER_RSZ 0x4
-
-#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
-
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
-
-#define ANA_IPT_IPT_GSZ 0x8
-
-#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
-#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
-#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
-#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
-#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
-#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
-#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
-#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
-
-#define ANA_PPT_PPT_RSZ 0x4
-
-#define ANA_FID_MAP_FID_MAP_RSZ 0x4
-
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
-#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
-
-#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
-#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
-#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
-#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
-#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
-#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
-#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
-#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
-
-#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
-#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
-#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
-#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
-#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
-#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
-#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
-#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
-#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
-#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
-#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
-#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
-
-#define ANA_CPUQ_8021_CFG_RSZ 0x4
-
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
-#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
-
-#define ANA_DSCP_CFG_RSZ 0x4
-
-#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
-#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
-#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
-#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
-#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
-#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
-
-#define ANA_DSCP_REWR_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
-
-#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
-#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
-#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
-
-#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
-#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
-#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
-#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
-
-#define ANA_FID_CFG_VID_MC_ENA BIT(0)
-
-#define ANA_POL_PIR_CFG_GSZ 0x20
-
-#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
-#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
-#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
-
-#define ANA_POL_CIR_CFG_GSZ 0x20
-
-#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
-#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
-#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
-
-#define ANA_POL_MODE_CFG_GSZ 0x20
-
-#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
-#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
-#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
-#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
-#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
-#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
-#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
-
-#define ANA_POL_PIR_STATE_GSZ 0x20
-
-#define ANA_POL_CIR_STATE_GSZ 0x20
-
-#define ANA_POL_STATE_GSZ 0x20
-
-#define ANA_POL_FLOWC_RSZ 0x4
-
-#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
-
-#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
-#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
-#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
-#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
-#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
-
-#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
-#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 2da8eee27e98..b38820849faa 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -402,9 +402,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
of_get_phy_mode(portnp, &phy_mode);
- priv->phy_mode = phy_mode;
+ ocelot_port->phy_mode = phy_mode;
- switch (priv->phy_mode) {
+ switch (ocelot_port->phy_mode) {
case PHY_INTERFACE_MODE_NA:
continue;
case PHY_INTERFACE_MODE_SGMII:
diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h
deleted file mode 100644
index 0a50d53bbd3f..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_dev.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_DEV_H_
-#define _MSCC_OCELOT_DEV_H_
-
-#define DEV_CLOCK_CFG 0x0
-
-#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
-#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
-#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
-#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
-#define DEV_CLOCK_CFG_PORT_RST BIT(3)
-#define DEV_CLOCK_CFG_PHY_RST BIT(2)
-#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
-
-#define DEV_PORT_MISC 0x4
-
-#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
-#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
-#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
-#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
-#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
-
-#define DEV_EVENTS 0x8
-
-#define DEV_EEE_CFG 0xc
-
-#define DEV_EEE_CFG_EEE_ENA BIT(22)
-#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
-#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
-#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define DEV_EEE_CFG_PORT_LPI BIT(0)
-
-#define DEV_RX_PATH_DELAY 0x10
-
-#define DEV_TX_PATH_DELAY 0x14
-
-#define DEV_PTP_PREDICT_CFG 0x18
-
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
-#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
-#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
-
-#define DEV_MAC_ENA_CFG 0x1c
-
-#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
-#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
-
-#define DEV_MAC_MODE_CFG 0x20
-
-#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
-#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
-#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
-
-#define DEV_MAC_MAXLEN_CFG 0x24
-
-#define DEV_MAC_TAGS_CFG 0x28
-
-#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
-#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
-#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
-#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
-
-#define DEV_MAC_ADV_CHK_CFG 0x2c
-
-#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
-
-#define DEV_MAC_IFG_CFG 0x30
-
-#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
-#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
-#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
-#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
-#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
-#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
-#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
-
-#define DEV_MAC_HDX_CFG 0x34
-
-#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
-#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
-#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
-#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
-#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
-#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
-#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
-#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
-#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
-
-#define DEV_MAC_DBG_CFG 0x38
-
-#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
-#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
-
-#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
-
-#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
-
-#define DEV_MAC_STICKY 0x44
-
-#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
-#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
-#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
-#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
-#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
-#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
-#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
-#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
-#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
-#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
-
-#define PCS1G_CFG 0x48
-
-#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
-#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
-#define PCS1G_CFG_PCS_ENA BIT(0)
-
-#define PCS1G_MODE_CFG 0x4c
-
-#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
-#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
-
-#define PCS1G_SD_CFG 0x50
-
-#define PCS1G_SD_CFG_SD_SEL BIT(8)
-#define PCS1G_SD_CFG_SD_POL BIT(4)
-#define PCS1G_SD_CFG_SD_ENA BIT(0)
-
-#define PCS1G_ANEG_CFG 0x54
-
-#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
-#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
-#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
-#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
-
-#define PCS1G_ANEG_NP_CFG 0x58
-
-#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
-#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
-
-#define PCS1G_LB_CFG 0x5c
-
-#define PCS1G_LB_CFG_RA_ENA BIT(4)
-#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
-#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
-
-#define PCS1G_DBG_CFG 0x60
-
-#define PCS1G_DBG_CFG_UDLT BIT(0)
-
-#define PCS1G_CDET_CFG 0x64
-
-#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
-
-#define PCS1G_ANEG_STATUS 0x68
-
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_STATUS_PR BIT(4)
-#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
-#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
-
-#define PCS1G_ANEG_NP_STATUS 0x6c
-
-#define PCS1G_LINK_STATUS 0x70
-
-#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
-#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
-#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
-#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
-#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
-#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
-
-#define PCS1G_LINK_DOWN_CNT 0x74
-
-#define PCS1G_STICKY 0x78
-
-#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
-#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
-
-#define PCS1G_DEBUG_STATUS 0x7c
-
-#define PCS1G_LPI_CFG 0x80
-
-#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
-#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
-#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
-#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
-#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
-#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
-
-#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
-
-#define PCS1G_LPI_STATUS 0x88
-
-#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
-#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
-#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
-#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
-#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
-#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
-#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
-
-#define PCS1G_TSTPAT_MODE_CFG 0x8c
-
-#define PCS1G_TSTPAT_STATUS 0x90
-
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
-#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
-
-#define DEV_PCS_FX100_CFG 0x94
-
-#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
-#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
-#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
-#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
-#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
-#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
-#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
-#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
-#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
-#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
-#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
-#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
-#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
-#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
-
-#define DEV_PCS_FX100_STATUS 0x98
-
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
-#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
-#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
-#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
-#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
-#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
-#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
-#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h
deleted file mode 100644
index d8c63aa761be..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_qsys.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_QSYS_H_
-#define _MSCC_OCELOT_QSYS_H_
-
-#define QSYS_PORT_MODE_RSZ 0x4
-
-#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
-#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
-
-#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
-
-#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
-#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
-
-#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
-#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
-#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
-#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
-#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
-#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
-
-#define QSYS_EEE_CFG_RSZ 0x4
-
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
-#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
-
-#define QSYS_SW_STATUS_RSZ 0x4
-
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
-#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
-
-#define QSYS_QMAP_GSZ 0x4
-
-#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
-#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
-#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
-#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
-#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
-#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
-#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
-#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
-
-#define QSYS_ISDX_SGRP_GSZ 0x4
-
-#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
-
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
-#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
-#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
-#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
-#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
-
-#define QSYS_RED_PROFILE_RSZ 0x4
-
-#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
-#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
-#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
-
-#define QSYS_RES_CFG_GSZ 0x8
-
-#define QSYS_RES_STAT_GSZ 0x8
-
-#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
-#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
-#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
-#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
-
-#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
-#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
-#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
-#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
-#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
-
-#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
-
-#define QSYS_PREEMPTION_CFG_RSZ 0x4
-
-#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
-#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
-#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
-#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_CIR_CFG_GSZ 0x80
-
-#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
-#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
-#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
-
-#define QSYS_EIR_CFG_GSZ 0x80
-
-#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
-#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
-#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
-#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
-#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
-#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
-#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
-
-#define QSYS_SE_CFG_GSZ 0x80
-
-#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
-#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
-#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
-#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
-#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
-#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
-#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
-#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
-#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
-#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
-
-#define QSYS_SE_DWRR_CFG_GSZ 0x80
-#define QSYS_SE_DWRR_CFG_RSZ 0x4
-
-#define QSYS_SE_CONNECT_GSZ 0x80
-
-#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
-#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
-#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
-#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
-#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
-#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
-#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
-#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
-#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
-#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
-#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
-#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
-#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
-
-#define QSYS_SE_DLB_SENSE_GSZ 0x80
-
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
-
-#define QSYS_CIR_STATE_GSZ 0x80
-
-#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
-#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
-#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
-#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
-#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
-
-#define QSYS_EIR_STATE_GSZ 0x80
-
-#define QSYS_SE_STATE_GSZ 0x80
-
-#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
-#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
-#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
-
-#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
-#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
-#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
-#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
-
-#define QSYS_TAG_CONFIG_RSZ 0x4
-
-#define QSYS_TAG_CONFIG_ENABLE BIT(0)
-#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
-#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
-#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
-
-#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
-#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
-#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
-#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
-
-#define QSYS_PORT_MAX_SDU_RSZ 0x4
-
-#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
-#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
-#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-
-#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
-
-#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
-#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-
-#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c979f38a2e0c..2ee0d0be113a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2892,7 +2892,7 @@ drop:
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev)
{
- struct sk_buff *segs, *curr;
+ struct sk_buff *segs, *curr, *next;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
netdev_tx_t status;
@@ -2901,10 +2901,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
if (IS_ERR(segs))
goto drop;
- while (segs) {
- curr = segs;
- segs = segs->next;
- curr->next = NULL;
+ skb_list_walk_safe(segs, curr, next) {
+ skb_mark_not_on_list(curr);
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 1a2634cbbb69..d21d706b83a7 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -612,7 +612,7 @@ static void undo_cable_magic(struct net_device *dev);
static void check_link(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
static void dump_ring(struct net_device *dev);
-static void ns_tx_timeout(struct net_device *dev);
+static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int alloc_ring(struct net_device *dev);
static void refill_rx(struct net_device *dev);
static void init_ring(struct net_device *dev);
@@ -1881,7 +1881,7 @@ static void dump_ring(struct net_device *dev)
}
}
-static void ns_tx_timeout(struct net_device *dev)
+static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 6af9a7eee114..8e24c7acf79b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1549,7 +1549,7 @@ static int ns83820_stop(struct net_device *ndev)
return 0;
}
-static void ns83820_tx_timeout(struct net_device *ndev)
+static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ns83820 *dev = PRIV(ndev);
u32 tx_done_idx;
@@ -1603,7 +1603,7 @@ static void ns83820_tx_watch(struct timer_list *t)
ndev->name,
dev->tx_done_idx, dev->tx_free_idx,
atomic_read(&dev->nr_tx_skbs));
- ns83820_tx_timeout(ndev);
+ ns83820_tx_timeout(ndev, UINT_MAX);
}
mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
@@ -1937,7 +1937,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
- dev->base = ioremap_nocache(addr, PAGE_SIZE);
+ dev->base = ioremap(addr, PAGE_SIZE);
dev->tx_descs = pci_alloc_consistent(pci_dev,
4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
dev->rx_info.descs = pci_alloc_consistent(pci_dev,
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index b339125b2f09..31be3ba66877 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
+ spin_lock_init(&lp->lock);
+
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
return 0;
}
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+ int i;
+ u16 bits;
+
+ for (i = 0; i < 1000; ++i) {
+ bits = SONIC_READ(SONIC_CMD) & mask;
+ if (!bits)
+ return;
+ if (irqs_disabled() || in_interrupt())
+ udelay(20);
+ else
+ usleep_range(100, 200);
+ }
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
/*
* Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
/*
* stop the SONIC, disable interrupts
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -161,7 +184,7 @@ static int sonic_close(struct net_device *dev)
return 0;
}
-static void sonic_tx_timeout(struct net_device *dev)
+static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
- int entry = lp->next_tx;
+ int entry;
+ unsigned long flags;
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->next_tx;
+
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- /*
- * Must set tx_skb[entry] only after clearing status, and
- * before clearing EOL and before stopping queue
- */
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
}
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
+ unsigned long flags;
+
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+ * with sonic_send_packet() so that the two functions can share state.
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+ * by macsonic which must use two IRQs with different priority levels.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ if (!status) {
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
+ }
do {
+ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
if (status & SONIC_INT_PKTRX) {
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
sonic_rx(dev); /* got packet(s) */
- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
int td_status;
int freed_some = 0;
- /* At this point, cur_tx is the index of a TD that is one of:
- * unallocated/freed (status set & tx_skb[entry] clear)
- * allocated and sent (status set & tx_skb[entry] set )
- * allocated and not yet sent (status clear & tx_skb[entry] set )
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ /* The state of a Transmit Descriptor may be inferred
+ * from { tx_skb[entry], td_status } as follows.
+ * { clear, clear } => the TD has never been used
+ * { set, clear } => the TD was handed to SONIC
+ * { set, set } => the TD was handed back
+ * { clear, set } => the TD is available for re-use
*/
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
break;
- if (td_status & 0x0001) {
+ if (td_status & SONIC_TCR_PTX) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
} else {
- lp->stats.tx_errors++;
- if (td_status & 0x0642)
+ if (td_status & (SONIC_TCR_EXD |
+ SONIC_TCR_EXC | SONIC_TCR_BCM))
lp->stats.tx_aborted_errors++;
- if (td_status & 0x0180)
+ if (td_status &
+ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
lp->stats.tx_carrier_errors++;
- if (td_status & 0x0020)
+ if (td_status & SONIC_TCR_OWC)
lp->stats.tx_window_errors++;
- if (td_status & 0x0004)
+ if (td_status & SONIC_TCR_FU)
lp->stats.tx_fifo_errors++;
}
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (freed_some || lp->tx_skb[entry] == NULL)
netif_wake_queue(dev); /* The ring is no longer full */
lp->cur_tx = entry;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
/*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (status & SONIC_INT_RFO) {
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
__func__);
- lp->stats.rx_fifo_errors++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
/* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE) {
+ if (status & SONIC_INT_FAE)
lp->stats.rx_frame_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
- }
- if (status & SONIC_INT_CRC) {
+ if (status & SONIC_INT_CRC)
lp->stats.rx_crc_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
- }
- if (status & SONIC_INT_MP) {
+ if (status & SONIC_INT_MP)
lp->stats.rx_missed_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
- }
/* transmit error */
if (status & SONIC_INT_TXER) {
- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ u16 tcr = SONIC_READ(SONIC_TCR);
+
+ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+ __func__, tcr);
+
+ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+ SONIC_TCR_FU | SONIC_TCR_BCM)) {
+ /* Aborted transmission. Try again. */
+ netif_stop_queue(dev);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
}
/* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
/* ... to help debug DMA problems causing endless interrupts. */
/* Bounce the eth interface to turn on the interrupt again. */
SONIC_WRITE(SONIC_IMR, 0);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
}
- /* load CAM done */
- if (status & SONIC_INT_LCD)
- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return IRQ_HANDLED;
}
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+ unsigned int last)
+{
+ unsigned int i = last;
+
+ do {
+ i = (i + 1) & SONIC_RRS_MASK;
+ if (addr == lp->rx_laddr[i])
+ return i;
+ } while (i != last);
+
+ return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (!*new_skb)
+ return false;
+
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(*new_skb, 2);
+
+ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!*new_addr) {
+ dev_kfree_skb(*new_skb);
+ *new_skb = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+ dma_addr_t old_addr, dma_addr_t new_addr)
+{
+ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+ u32 buf;
+
+ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+ * scans the other resources in the RRA, those in the range [RWP, RRP).
+ */
+ do {
+ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+ if (buf == old_addr)
+ break;
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+ } while (entry != end);
+
+ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
/*
* We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
- int status;
int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
+ bool rbe = false;
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
- struct sk_buff *used_skb;
- struct sk_buff *new_skb;
- dma_addr_t new_laddr;
- u16 bufadr_l;
- u16 bufadr_h;
- int pkt_len;
-
- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
- if (status & SONIC_RCR_PRX) {
- /* Malloc up new buffer. */
- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
- if (new_skb == NULL) {
- lp->stats.rx_dropped++;
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+ /* If the RD has LPKT set, the chip has finished with the RB */
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+ int i = index_from_addr(lp, addr, entry);
+
+ if (i < 0) {
+ WARN_ONCE(1, "failed to find buffer!\n");
break;
}
- /* provide 16 byte IP header alignment unless DMA requires otherwise */
- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
- skb_reserve(new_skb, 2);
-
- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
- SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!new_laddr) {
- dev_kfree_skb(new_skb);
- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+ struct sk_buff *used_skb = lp->rx_skb[i];
+ int pkt_len;
+
+ /* Pass the used buffer up the stack */
+ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+ DMA_FROM_DEVICE);
+
+ pkt_len = sonic_rda_get(dev, entry,
+ SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb,
+ dev);
+ netif_rx(used_skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ lp->rx_skb[i] = new_skb;
+ lp->rx_laddr[i] = new_laddr;
+ } else {
+ /* Failed to obtain a new buffer so re-use it */
+ new_laddr = addr;
lp->stats.rx_dropped++;
- break;
}
-
- /* now we have a new skb to replace it, pass the used one up the stack */
- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
- used_skb = lp->rx_skb[entry];
- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
- skb_trim(used_skb, pkt_len);
- used_skb->protocol = eth_type_trans(used_skb, dev);
- netif_rx(used_skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
-
- /* and insert the new skb */
- lp->rx_laddr[entry] = new_laddr;
- lp->rx_skb[entry] = new_skb;
-
- bufadr_l = (unsigned long)new_laddr & 0xffff;
- bufadr_h = (unsigned long)new_laddr >> 16;
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
- } else {
- /* This should only happen, if we enable accepting broken packets. */
- lp->stats.rx_errors++;
- if (status & SONIC_RCR_FAER)
- lp->stats.rx_frame_errors++;
- if (status & SONIC_RCR_CRCR)
- lp->stats.rx_crc_errors++;
- }
- if (status & SONIC_RCR_LPKT) {
- /*
- * this was the last packet out of the current receive buffer
- * give the buffer back to the SONIC
+ /* If RBE is already asserted when RWP advances then
+ * it's safe to clear RBE after processing this packet.
*/
- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
- }
- } else
- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
- dev->name);
+ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+ sonic_update_rra(dev, lp, addr, new_laddr);
+ }
/*
* give back the descriptor
*/
- sonic_rda_put(dev, entry, SONIC_RD_LINK,
- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
- lp->eol_rx = entry;
- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+ prev_entry = entry;
+ entry = (entry + 1) & SONIC_RDS_MASK;
+ }
+
+ lp->cur_rx = entry;
+
+ if (prev_entry != lp->eol_rx) {
+ /* Advance the EOL flag to put descriptors back into service */
+ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+ lp->eol_rx = prev_entry;
}
+
+ if (rbe)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
/*
* If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
+ unsigned long flags;
+
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
- /* issue Load CAM command */
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+ /* LCAM and TXP commands can't be used simultaneously */
+ spin_lock_irqsave(&lp->lock, flags);
+ sonic_quiesce(dev, SONIC_CR_TXP);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
}
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int cmd;
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ /* While in reset mode, clear CAM Enable register */
+ SONIC_WRITE(SONIC_CE, 0);
+
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
*/
SONIC_WRITE(SONIC_CMD, 0);
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+ sonic_quiesce(dev, SONIC_CR_ALL);
/*
* initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
}
/* initialize all RRA registers */
- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_REA, lp->rra_end);
- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
- break;
- }
-
- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), i);
+ sonic_quiesce(dev, SONIC_CR_RRRA);
/*
* Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
- break;
- }
- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
/*
* enable receiver, disable loopback
* and enable all interrupts
*/
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
- cmd = SONIC_READ(SONIC_CMD);
- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
SONIC_READ(SONIC_CMD));
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 2b27f7049acb..e0e4cba6f6f6 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -110,6 +110,9 @@
#define SONIC_CR_TXP 0x0002
#define SONIC_CR_HTX 0x0001
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+ SONIC_CR_RXEN | SONIC_CR_TXP)
+
/*
* SONIC data configuration bits
*/
@@ -175,6 +178,7 @@
#define SONIC_TCR_NCRS 0x0100
#define SONIC_TCR_CRLS 0x0080
#define SONIC_TCR_EXC 0x0040
+#define SONIC_TCR_OWC 0x0020
#define SONIC_TCR_PMB 0x0008
#define SONIC_TCR_FU 0x0004
#define SONIC_TCR_BCM 0x0002
@@ -274,8 +278,9 @@
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
@@ -312,8 +317,6 @@ struct sonic_local {
u32 rda_laddr; /* logical DMA address of RDA */
dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
- unsigned int rra_end;
- unsigned int cur_rwp;
unsigned int cur_rx;
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
@@ -322,6 +325,7 @@ struct sonic_local {
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
+ spinlock_t lock;
};
#define TX_TIMEOUT (3 * HZ)
@@ -336,7 +340,7 @@ static int sonic_close(struct net_device *dev);
static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
-static void sonic_tx_timeout(struct net_device *dev);
+static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
as far as we can tell. */
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
int offset, __u16 val)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- ((__u16 *) base + (offset*2))[1] = val;
+ __raw_writew(val, base + (offset * 2) + 1);
#else
- ((__u16 *) base + (offset*2))[0] = val;
+ __raw_writew(val, base + (offset * 2) + 0);
#endif
else
- ((__u16 *) base)[offset] = val;
+ __raw_writew(val, base + (offset * 1) + 0);
}
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
int offset)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- return ((volatile __u16 *) base + (offset*2))[1];
+ return __raw_readw(base + (offset * 2) + 1);
#else
- return ((volatile __u16 *) base + (offset*2))[0];
+ return __raw_readw(base + (offset * 2) + 0);
#endif
else
- return ((volatile __u16 *) base)[offset];
+ return __raw_readw(base + (offset * 1) + 0);
}
/* Inlines that you should actually use for reading/writing DMA buffers */
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
(entry * SIZEOF_SONIC_RR) + offset);
}
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return lp->rra_laddr +
+ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
static const char version[] =
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index e0b2bf327905..0ec6b8e8b549 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7238,7 +7238,7 @@ out_unlock:
* void
*/
-static void s2io_tx_watchdog(struct net_device *dev)
+static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct s2io_nic *sp = netdev_priv(dev);
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 0a921f30f98f..6fa3159a977f 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1065,7 +1065,7 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
-static void s2io_tx_watchdog(struct net_device *dev);
+static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1d334f2e0a56..9b63574b6202 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3273,7 +3273,7 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
*/
-static void vxge_tx_watchdog(struct net_device *dev)
+static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct vxgedev *vdev;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index bac5be4d4f43..a3f68a718813 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -31,6 +31,7 @@ config NFP_APP_FLOWER
bool "NFP4000/NFP6000 TC Flower offload support"
depends on NFP
depends on NET_SWITCHDEV
+ depends on IPV6!=m || NFP=m
default y
---help---
Enable driver support for TC Flower offload on NFP4000 and NFP6000.
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9f8a1f69c0c4..23ebddfb9532 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
u8 mask, val;
int err;
- if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
- err = -EOPNOTSUPP;
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
goto err_delete;
- }
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
@@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
- err = -EOPNOTSUPP;
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
- if (!match) {
- err = -ENOMEM;
- goto err_delete;
- }
-
+ if (!match)
+ return -ENOMEM;
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
@@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
- return err;
+ return -EOPNOTSUPP;
}
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index a460c75522be..d81d450be50e 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -26,6 +26,7 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_CRYPTO_ADD = 10,
NFP_CCM_TYPE_CRYPTO_DEL = 11,
NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
+ NFP_CCM_TYPE_CRYPTO_RESYNC = 13,
__NFP_CCM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
index 60372ddf69f0..bffe58bb2f27 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -4,6 +4,10 @@
#ifndef NFP_CRYPTO_H
#define NFP_CRYPTO_H 1
+struct net_device;
+struct nfp_net;
+struct nfp_net_tls_resync_req;
+
struct nfp_net_tls_offload_ctx {
__be32 fw_handle[2];
@@ -17,11 +21,22 @@ struct nfp_net_tls_offload_ctx {
#ifdef CONFIG_TLS_DEVICE
int nfp_net_tls_init(struct nfp_net *nn);
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len);
#else
static inline int nfp_net_tls_init(struct nfp_net *nn)
{
return 0;
}
+
+static inline int
+nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
index 67413d946c4a..8d1458896bcb 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -9,6 +9,14 @@
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+struct nfp_net_tls_resync_req {
+ __be32 fw_handle[2];
+ __be32 tcp_seq;
+ u8 l3_offset;
+ u8 l4_offset;
+ u8 resv[2];
+};
+
struct nfp_crypto_reply_simple {
struct nfp_ccm_hdr hdr;
__be32 error;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 96a96b35c0ca..7c50e3dfb9d5 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -5,6 +5,7 @@
#include <linux/ipv6.h>
#include <linux/skbuff.h>
#include <linux/string.h>
+#include <net/inet6_hashtables.h>
#include <net/tls.h>
#include "../ccm.h"
@@ -391,8 +392,9 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
return 0;
- tls_offload_rx_resync_set_type(sk,
- TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ if (!nn->tlv_caps.tls_resync_ss)
+ tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+
return 0;
err_fw_remove:
@@ -424,6 +426,7 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
struct nfp_crypto_req_update *req;
+ enum nfp_ccm_type type;
struct sk_buff *skb;
gfp_t flags;
int err;
@@ -442,15 +445,18 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
req->tcp_seq = cpu_to_be32(seq);
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+ type = NFP_CCM_TYPE_CRYPTO_UPDATE;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- err = nfp_net_tls_communicate_simple(nn, skb, "sync",
- NFP_CCM_TYPE_CRYPTO_UPDATE);
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
if (err)
return err;
ntls->next_seq = seq;
} else {
- nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ if (nn->tlv_caps.tls_resync_ss)
+ type = NFP_CCM_TYPE_CRYPTO_RESYNC;
+ nfp_ccm_mbox_post(nn, skb, type,
sizeof(struct nfp_crypto_reply_simple));
+ atomic_inc(&nn->ktls_rx_resync_sent);
}
return 0;
@@ -462,6 +468,79 @@ static const struct tlsdev_ops nfp_net_tls_ops = {
.tls_dev_resync = nfp_net_tls_resync,
};
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be32 tcp_seq;
+ int err;
+
+ iph = pkt + req->l3_offset;
+ ipv6h = pkt + req->l3_offset;
+ th = pkt + req->l4_offset;
+
+ if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
+ req->l3_offset, req->l4_offset, pkt_len);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ switch (iph->version) {
+ case 4:
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case 6:
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+ break;
+#endif
+ default:
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
+ req->l3_offset, req->l4_offset, iph->version);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ err = 0;
+ if (!sk)
+ goto err_cnt_ign;
+ if (!tls_is_sk_rx_device_offloaded(sk) ||
+ sk->sk_shutdown & RCV_SHUTDOWN)
+ goto err_put_sock;
+
+ ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
+ /* some FW versions can't report the handle and report 0s */
+ if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
+ memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
+ goto err_put_sock;
+
+ /* copy to ensure alignment */
+ memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
+ tls_offload_rx_resync_request(sk, tcp_seq);
+ atomic_inc(&nn->ktls_rx_resync_req);
+
+ sock_gen_put(sk);
+ return 0;
+
+err_put_sock:
+ sock_gen_put(sk);
+err_cnt_ign:
+ atomic_inc(&nn->ktls_rx_resync_ign);
+ return err;
+}
+
static int nfp_net_tls_reset(struct nfp_net *nn)
{
struct nfp_crypto_req_reset *req;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1b019fdfcd97..c06600fb47ff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -22,8 +22,9 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
-#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
-#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
+ IP_TUNNEL_INFO_IPV6)
+#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
@@ -394,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev, struct netlink_ext_ack *extack)
+nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
+ bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
+ size_t act_size = sizeof(struct nfp_fl_set_tun);
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
+ return -EOPNOTSUPP;
+
+ if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
+ return -EOPNOTSUPP;
+
BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
@@ -417,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
return -EOPNOTSUPP;
}
- set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
tmp_set_ip_tun_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
- FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+ FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
+ FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
+#ifdef CONFIG_IPV6
+ } else if (ipv6) {
+ struct net *net = dev_net(netdev);
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+
+ flow.daddr = ip_tun->key.u.ipv6.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
+ if (!IS_ERR(dst)) {
+ set_tun->ttl = ip6_dst_hoplimit(dst);
+ dst_release(dst);
+ } else {
+ set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ }
+#endif
} else {
struct net *net = dev_net(netdev);
struct flowi4 flow = {};
@@ -455,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
}
@@ -467,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
}
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ if (ipv6) {
+ pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
+ pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
+ } else {
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ }
return 0;
}
@@ -956,8 +985,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
@@ -1032,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
}
@@ -1046,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev, extack);
+ err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
+ netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
+ *a_len += sizeof(struct nfp_fl_set_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 05981b54eaab..a595ddb92bff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -270,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
- nfp_tunnel_request_route(app, skb);
+ nfp_tunnel_request_route_v4(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
+ nfp_tunnel_request_route_v6(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
+ nfp_tunnel_keep_alive_v6(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
@@ -361,7 +367,8 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
- } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 7eb2ec8969c3..9b50d76bbc09 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,6 +26,7 @@
#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
+#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
@@ -63,6 +64,7 @@
#define NFP_FL_MAX_GENEVE_OPT_ACT 32
#define NFP_FL_MAX_GENEVE_OPT_CNT 64
#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+#define NFP_FL_MAX_GENEVE_OPT_KEY_V6 8
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
@@ -70,7 +72,7 @@
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
-#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
@@ -99,8 +101,8 @@
/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000
-#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
-#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+#define NFP_FL_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
@@ -206,13 +208,16 @@ struct nfp_fl_pre_lag {
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
- __be16 reserved;
- __be32 ipv4_dst;
- /* reserved for use with IPv6 addresses */
- __be32 extra[3];
+ __be16 flags;
+ union {
+ __be32 ipv4_dst;
+ struct in6_addr ipv6_dst;
+ };
};
-struct nfp_fl_set_ipv4_tun {
+#define NFP_FL_PRE_TUN_IPV6 BIT(0)
+
+struct nfp_fl_set_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -387,6 +392,11 @@ struct nfp_flower_tun_ipv4 {
__be32 dst;
};
+struct nfp_flower_tun_ipv6 {
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
struct nfp_flower_tun_ip_ext {
u8 tos;
u8 ttl;
@@ -416,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun {
__be32 tun_id;
};
+/* Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_udp_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 reserved1;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be32 reserved2;
+ __be32 tun_id;
+};
+
/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
* -----------------------------------------------------------------
* 3 2 1
@@ -445,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun {
__be32 reserved2;
};
+/* Flow Frame GRE TUNNEL V6 --> Tunnel details (12W/48B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_gre_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -485,6 +571,10 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e0c985fcaec1..d55d0d33bc45 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -43,6 +43,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
+#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -62,18 +63,26 @@ struct nfp_fl_stats_id {
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
* @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload
- * @neigh_off_list: List of neighbour offloads
+ * @ipv6_off_list: List of IPv6 addresses to offload
+ * @neigh_off_list_v4: List of IPv4 neighbour offloads
+ * @neigh_off_list_v6: List of IPv6 neighbour offloads
* @ipv4_off_lock: Lock for the IPv4 address list
- * @neigh_off_lock: Lock for the neighbour address list
+ * @ipv6_off_lock: Lock for the IPv6 address list
+ * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
+ * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_fl_tunnel_offloads {
struct rhashtable offloaded_macs;
struct list_head ipv4_off_list;
- struct list_head neigh_off_list;
+ struct list_head ipv6_off_list;
+ struct list_head neigh_off_list_v4;
+ struct list_head neigh_off_list_v6;
struct mutex ipv4_off_lock;
- spinlock_t neigh_off_lock;
+ struct mutex ipv6_off_lock;
+ spinlock_t neigh_off_lock_v4;
+ spinlock_t neigh_off_lock_v6;
struct ida mac_off_ids;
struct notifier_block neigh_nb;
};
@@ -273,12 +282,25 @@ struct nfp_fl_stats {
u64 used;
};
+/**
+ * struct nfp_ipv6_addr_entry - cached IPv6 addresses
+ * @ipv6_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv6_addr_entry {
+ struct in6_addr ipv6_addr;
+ int ref_count;
+ struct list_head list;
+};
+
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
struct rhash_head fl_node;
struct rcu_head rcu;
__be32 nfp_tun_ipv4_addr;
+ struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
@@ -396,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
unsigned long event, void *ptr);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9cc3ba17ff69..546bc01d507d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,8 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_cls_offload *flow, u8 key_type)
+ struct flow_rule *rule, u8 key_type)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
- struct nfp_flower_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
- struct nfp_flower_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
msk->ipv4_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
- struct nfp_flower_ipv6 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
msk->ipv6_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static int
-nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
- flow_rule_match_enc_opts(flow->rule, &match);
+ flow_rule_match_enc_opts(rule, &match);
memcpy(ext, match.key->data, match.key->len);
memcpy(msk, match.mask->data, match.mask->len);
@@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct nfp_flower_tun_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
@@ -299,12 +282,26 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
}
static void
+nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
+ struct nfp_flower_tun_ipv6 *msk,
+ struct flow_rule *rule)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct nfp_flower_tun_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match;
@@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
}
static void
-nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
- struct nfp_flower_ipv4_gre_tun *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+ u32 vni;
- /* NVGRE is the only supported GRE tunnel type */
- ext->ethertype = cpu_to_be16(ETH_P_TEB);
- msk->ethertype = cpu_to_be16(~0);
+ flow_rule_match_enc_keyid(rule, &match);
+ vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key = cpu_to_be32(vni);
+ vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key_msk = cpu_to_be32(vni);
+ }
+}
+static void
+nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
+ __be16 *flags_msk, struct flow_rule *rule)
+{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
- ext->tun_key = match.key->keyid;
- msk->tun_key = match.mask->keyid;
+ *key = match.key->keyid;
+ *key_msk = match.mask->keyid;
- ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
- msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid match;
- u32 temp_vni;
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
- flow_rule_match_enc_keyid(rule, &match);
- temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
- ext->tun_id = cpu_to_be32(temp_vni);
- temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
- msk->tun_id = cpu_to_be32(temp_vni);
- }
+static void
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
+ struct nfp_flower_ipv6_udp_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
+
+static void
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
+ struct nfp_flower_ipv6_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
@@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u32 port_id;
int err;
u8 *ext;
@@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- flow, key_ls->key_layer);
+ rule, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
@@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
(struct nfp_flower_tp_ports *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
@@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
(struct nfp_flower_ipv4 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
@@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
(struct nfp_flower_ipv6 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_gre_tun);
- msk += sizeof(struct nfp_flower_ipv4_gre_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_gre_tun((void *)ext,
+ (void *)msk, rule);
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
+ dst = &gre_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv6_gre_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_udp_tun);
- msk += sizeof(struct nfp_flower_ipv4_udp_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_udp_tun *udp_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_udp_tun((void *)ext,
+ (void *)msk, rule);
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
+ dst = &udp_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv6_udp_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_udp_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, msk, flow);
+ err = nfp_flower_compile_geneve_opt(ext, msk, rule);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 987ae221f6be..7ca5c1becfcf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -54,6 +54,10 @@
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
+
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
@@ -64,7 +68,8 @@
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
- NFP_FLOWER_LAYER_IPV4)
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
struct nfp_flower_merge_check {
union {
@@ -146,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size,
+ u32 *key_layer_two, int *key_size, bool ipv6,
struct netlink_ext_ack *extack)
{
- if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
+ (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
}
@@ -167,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
struct flow_dissector_key_enc_opts *enc_op,
u32 *key_layer_two, u8 *key_layer, int *key_size,
struct nfp_flower_priv *priv,
- enum nfp_flower_tun_type *tun_type,
+ enum nfp_flower_tun_type *tun_type, bool ipv6,
struct netlink_ext_ack *extack)
{
int err;
@@ -176,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
*key_layer |= NFP_FLOWER_LAYER_VXLAN;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (enc_op) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
@@ -192,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
*key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (!enc_op)
break;
@@ -200,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP;
}
- err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
- key_size, extack);
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
+ ipv6, extack);
if (err)
return err;
break;
@@ -237,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
@@ -268,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_match_enc_opts enc_op = { NULL, NULL };
struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
struct flow_match_control enc_ctl;
struct flow_match_ports enc_ports;
+ bool ipv6_tun = false;
flow_rule_match_enc_control(rule, &enc_ctl);
@@ -277,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
}
- if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
+
+ ipv6_tun = enc_ctl.key->addr_type ==
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (ipv6_tun &&
+ !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
return -EOPNOTSUPP;
}
- /* These fields are already verified as used. */
- flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ if (!ipv6_tun &&
+ enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
return -EOPNOTSUPP;
}
+ if (ipv6_tun) {
+ flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
+ if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
+ sizeof(ipv6_addrs.mask->dst))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
- if (netif_is_gretap(netdev)) {
- *tun_type = NFP_FL_TUNNEL_GRE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GRE;
- key_size +=
- sizeof(struct nfp_flower_ipv4_gre_tun);
+ if (!netif_is_gretap(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ return -EOPNOTSUPP;
+ }
- if (enc_op.key) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
- return -EOPNOTSUPP;
- }
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+
+ if (ipv6_tun) {
+ key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ key_size +=
+ sizeof(struct nfp_flower_ipv6_udp_tun);
} else {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
return -EOPNOTSUPP;
}
} else {
@@ -323,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
&key_layer_two,
&key_layer,
&key_size, priv,
- tun_type, extack);
+ tun_type, ipv6_tun,
+ extack);
if (err)
return err;
@@ -491,6 +540,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
goto err_free_mask;
flow_pay->nfp_tun_ipv4_addr = 0;
+ flow_pay->nfp_tun_ipv6 = NULL;
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
@@ -517,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
struct nfp_fl_set_ip4_addrs *ipv4_add;
struct nfp_fl_set_ipv6_addr *ipv6_add;
struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tport *tport;
struct nfp_fl_set_eth *eth;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
+ bool ipv6_tun = false;
u8 act_id = 0;
u8 *ports;
int i;
@@ -542,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
case NFP_FL_ACTION_OPCODE_POP_VLAN:
merge->tci = cpu_to_be16(0);
break;
- case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
/* New tunnel header means l2 to l4 can be matched. */
eth_broadcast_addr(&merge->l2.mac_dst[0]);
eth_broadcast_addr(&merge->l2.mac_src[0]);
memset(&merge->l4, 0xff,
sizeof(struct nfp_flower_tp_ports));
- memset(&merge->ipv4, 0xff,
- sizeof(struct nfp_flower_ipv4));
+ if (ipv6_tun)
+ memset(&merge->ipv6, 0xff,
+ sizeof(struct nfp_flower_ipv6));
+ else
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
break;
case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
eth = (struct nfp_fl_set_eth *)a;
@@ -597,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
ports[i] |= tport->tp_port_mask[i];
break;
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ pre_tun = (struct nfp_fl_pre_tunnel *)a;
+ ipv6_tun = be16_to_cpu(pre_tun->flags) &
+ NFP_FL_PRE_TUN_IPV6;
+ break;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
break;
@@ -765,15 +825,15 @@ nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{
- struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_set_tun *tun;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
- tun = (struct nfp_fl_set_ipv4_tun *)a;
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
+ tun = (struct nfp_fl_set_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci;
@@ -1058,15 +1118,22 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
- if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
+ key_layer & NFP_FLOWER_LAYER_IPV6) {
+ /* Flags and proto fields have same offset in IPv4 and IPv6. */
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int size;
int i;
+ size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
+ sizeof(struct nfp_flower_ipv4) :
+ sizeof(struct nfp_flower_ipv6);
+
mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
- for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ for (i = 0; i < size; i++)
if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
@@ -1195,6 +1262,8 @@ err_remove_rhash:
err_release_metadata:
nfp_modify_flow_metadata(app, flow_pay);
err_destroy_flow:
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
kfree(flow_pay->action_data);
kfree(flow_pay->mask_data);
kfree(flow_pay->unmasked_data);
@@ -1311,6 +1380,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+ if (nfp_flow->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
+
if (!nfp_flow->in_hw) {
err = 0;
goto err_free_merge_flow;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2600ce476d6b..2df3deedf9fd 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -55,6 +55,25 @@ struct nfp_tun_active_tuns {
};
/**
+ * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @tun_info.ipv6: dest IPv6 address of active route
+ * @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns_v6 {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info_v6 {
+ struct in6_addr ipv6;
+ __be32 egress_port;
+ } tun_info[];
+};
+
+/**
* struct nfp_tun_neigh - neighbour/route entry on the NFP
* @dst_ipv4: destination IPv4 address
* @src_ipv4: source IPv4 address
@@ -71,6 +90,22 @@ struct nfp_tun_neigh {
};
/**
+ * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
+ * @dst_ipv6: destination IPv6 address
+ * @src_ipv6: source IPv6 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv6
+ */
+struct nfp_tun_neigh_v6 {
+ struct in6_addr dst_ipv6;
+ struct in6_addr src_ipv6;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
* struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
* @ingress_port: ingress port of packet that signalled request
* @ipv4_addr: destination ipv4 address for route
@@ -83,13 +118,23 @@ struct nfp_tun_req_route_ipv4 {
};
/**
- * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
- * @ipv4_addr: destination of route
+ * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv6_addr: destination ipv6 address for route
+ */
+struct nfp_tun_req_route_ipv6 {
+ __be32 ingress_port;
+ struct in6_addr ipv6_addr;
+};
+
+/**
+ * struct nfp_offloaded_route - routes that are offloaded to the NFP
* @list: list pointer
+ * @ip_add: destination of route - can be IPv4 or IPv6
*/
-struct nfp_ipv4_route_entry {
- __be32 ipv4_addr;
+struct nfp_offloaded_route {
struct list_head list;
+ u8 ip_add[];
};
#define NFP_FL_IPV4_ADDRS_MAX 32
@@ -116,6 +161,18 @@ struct nfp_ipv4_addr_entry {
struct list_head list;
};
+#define NFP_FL_IPV6_ADDRS_MAX 4
+
+/**
+ * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
+ */
+struct nfp_tun_ipv6_addr {
+ __be32 count;
+ struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
+};
+
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/**
@@ -206,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
rcu_read_unlock();
}
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct nfp_tun_active_tuns_v6 *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ void *ipv6_add;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != struct_size(payload, tun_info, count)) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ ipv6_add = &payload->tun_info[i].ipv6;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_dev_get(app, port, NULL);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+ rcu_read_unlock();
+#endif
+}
+
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -224,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
return 0;
}
-static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+static bool
+__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
+ void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
return true;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
return false;
}
-static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+static int
+__nfp_tun_add_route_to_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- return;
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
+ return 0;
}
- }
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+
+ entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
if (!entry) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
- return;
+ spin_unlock_bh(list_lock);
+ return -ENOMEM;
}
- entry->ipv4_addr = ipv4_addr;
- list_add_tail(&entry->list, &priv->tun.neigh_off_list);
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ memcpy(entry->ip_add, add, add_len);
+ list_add_tail(&entry->list, route_list);
+ spin_unlock_bh(list_lock);
+
+ return 0;
}
-static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+static void
+__nfp_tun_del_route_from_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
list_del(&entry->list);
kfree(entry);
break;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
+}
+
+static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static bool
+nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
}
static void
-nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
- struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
u32 port_id;
@@ -302,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
/* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
- nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
goto send_msg;
@@ -314,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
- nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+ nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
@@ -322,16 +477,54 @@ send_msg:
(unsigned char *)&payload, flag);
}
+static void
+nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
+ struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh_v6 payload;
+ u32 port_id;
+
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
+ payload.dst_ipv6 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
+ nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
+ /* Trigger probe to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv6 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(port_id);
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh_v6),
+ (unsigned char *)&payload, flag);
+}
+
static int
nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nfp_flower_priv *app_priv;
struct netevent_redirect *redir;
- struct flowi4 flow = {};
+ struct flowi4 flow4 = {};
+ struct flowi6 flow6 = {};
struct neighbour *n;
struct nfp_app *app;
struct rtable *rt;
+ bool ipv6 = false;
int err;
switch (event) {
@@ -346,7 +539,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
- flow.daddr = *(__be32 *)n->primary_key;
+ if (n->tbl->family == AF_INET6)
+ ipv6 = true;
+
+ if (ipv6)
+ flow6.daddr = *(struct in6_addr *)n->primary_key;
+ else
+ flow4.daddr = *(__be32 *)n->primary_key;
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
@@ -356,28 +555,46 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
/* Only concerned with changes to routes already added to NFP. */
- if (!nfp_tun_has_route(app, flow.daddr))
+ if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
+ (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET)
- /* Do a route lookup to populate flow data. */
- rt = ip_route_output_key(dev_net(n->dev), &flow);
- err = PTR_ERR_OR_ZERO(rt);
- if (err)
+ if (ipv6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *dst;
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
+ &flow6, NULL);
+ if (IS_ERR(dst))
+ return NOTIFY_DONE;
+
+ dst_release(dst);
+ flow6.flowi6_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
+#else
return NOTIFY_DONE;
+#endif /* CONFIG_IPV6 */
+ } else {
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow4);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
- ip_rt_put(rt);
+ ip_rt_put(rt);
+
+ flow4.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
+ }
#else
return NOTIFY_DONE;
-#endif
-
- flow.flowi4_proto = IPPROTO_UDP;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+#endif /* CONFIG_INET */
return NOTIFY_OK;
}
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_tun_req_route_ipv4 *payload;
struct net_device *netdev;
@@ -411,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+ nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n);
rcu_read_unlock();
return;
@@ -421,6 +638,48 @@ fail_rcu_unlock:
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv6 *payload;
+ struct net_device *netdev;
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+ struct neighbour *n;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
+ if (!netdev)
+ goto fail_rcu_unlock;
+
+ flow.daddr = payload->ipv6_addr;
+ flow.flowi6_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
+ NULL);
+ if (IS_ERR(dst))
+ goto fail_rcu_unlock;
+#else
+ goto fail_rcu_unlock;
+#endif
+
+ n = dst_neigh_lookup(dst, &flow.daddr);
+ dst_release(dst);
+ if (!n)
+ goto fail_rcu_unlock;
+
+ nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
+ neigh_release(n);
+ rcu_read_unlock();
+ return;
+
+fail_rcu_unlock:
+ rcu_read_unlock();
+ nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
+}
+
static void nfp_tun_write_ipv4_list(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -502,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
nfp_tun_write_ipv4_list(app);
}
+static void nfp_tun_write_ipv6_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+ struct nfp_tun_ipv6_addr payload;
+ int count = 0;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
+ if (count >= NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
+ break;
+ }
+ payload.ipv6_addr[count++] = entry->ipv6_addr;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ payload.count = cpu_to_be32(count);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
+ sizeof(struct nfp_tun_ipv6_addr),
+ &payload, GFP_KERNEL);
+}
+
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
+ if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
+ entry->ref_count++;
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ return entry;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return NULL;
+ }
+ entry->ipv6_addr = *ipv6;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ nfp_tun_write_ipv6_list(app);
+
+ return entry;
+}
+
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ bool freed = false;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ if (!--entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ freed = true;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ if (freed)
+ nfp_tun_write_ipv6_list(app);
+}
+
static int
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
@@ -1013,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app)
ida_init(&priv->tun.mac_off_ids);
- /* Initialise priv data for IPv4 offloading. */
+ /* Initialise priv data for IPv4/v6 offloading. */
mutex_init(&priv->tun.ipv4_off_lock);
INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
+ mutex_init(&priv->tun.ipv6_off_lock);
+ INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->tun.neigh_off_lock);
- INIT_LIST_HEAD(&priv->tun.neigh_off_list);
+ spin_lock_init(&priv->tun.neigh_off_lock_v4);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
+ spin_lock_init(&priv->tun.neigh_off_lock_v6);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
err = register_netevent_notifier(&priv->tun.neigh_nb);
@@ -1034,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app)
void nfp_tunnel_config_stop(struct nfp_app *app)
{
+ struct nfp_offloaded_route *route_entry, *temp;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *route_entry;
struct nfp_ipv4_addr_entry *ip_entry;
+ struct nfp_tun_neigh_v6 ipv6_route;
+ struct nfp_tun_neigh ipv4_route;
struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb);
@@ -1050,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
kfree(ip_entry);
}
- /* Free any memory that may be occupied by the route list. */
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
- list);
+ mutex_destroy(&priv->tun.ipv6_off_lock);
+
+ /* Free memory in the route list and remove entries from fw cache. */
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v4, list) {
+ memset(&ipv4_route, 0, sizeof(ipv4_route));
+ memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
+ sizeof(ipv4_route.dst_ipv4));
list_del(&route_entry->list);
kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv4_route,
+ GFP_KERNEL);
+ }
+
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v6, list) {
+ memset(&ipv6_route, 0, sizeof(ipv6_route));
+ memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
+ sizeof(ipv6_route.dst_ipv6));
+ list_del(&route_entry->list);
+ kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv6_route,
+ GFP_KERNEL);
}
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 250f510b1d21..ff4438478ea9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -586,6 +586,9 @@ struct nfp_net_dp {
* @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to
* lack of space
+ * @ktls_rx_resync_req: Counter of TLS RX resync requested
+ * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored
+ * @ktls_rx_resync_sent: Counter of TLS RX resync completed
* @mbox_cmsg: Common Control Message via vNIC mailbox state
* @mbox_cmsg.queue: CCM mbox queue of pending messages
* @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
@@ -674,6 +677,9 @@ struct nfp_net {
atomic64_t ktls_conn_id_gen;
atomic_t ktls_no_space;
+ atomic_t ktls_rx_resync_req;
+ atomic_t ktls_rx_resync_ign;
+ atomic_t ktls_rx_resync_sent;
struct {
struct sk_buff_head queue;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index bcdcd6de7dea..9bfb3b077bc1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -47,6 +47,7 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
+#include "crypto/fw.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -1321,17 +1322,11 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
netdev_tx_reset_queue(nd_q);
}
-static void nfp_net_tx_timeout(struct net_device *netdev)
+static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
- int i;
- for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
- continue;
- nn_warn(nn, "TX timeout on ring: %d\n", i);
- }
- nn_warn(nn, "TX watchdog timeout\n");
+ nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
/* Receive processing
@@ -1667,9 +1662,9 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
&rx_hash->hash);
}
-static void *
+static bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, int meta_len)
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
u32 meta_info;
@@ -1699,14 +1694,20 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
(__force __wsum)__get_unaligned_cpu32(data);
data += 4;
break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return NULL;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
default:
- return NULL;
+ return true;
}
meta_info >>= NFP_NET_META_FIELD_SIZE;
}
- return data;
+ return data != pkt;
}
static void
@@ -1891,12 +1892,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash_desc(dp->netdev, &meta,
rxbuf->frag + meta_off, rxd);
} else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- meta_len);
- if (unlikely(end != rxbuf->frag + pkt_off)) {
+ if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index d835c14b7257..c3a763134e79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -17,6 +17,30 @@ static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
}
+static bool
+nfp_net_tls_parse_crypto_ops(struct device *dev, struct nfp_net_tlv_caps *caps,
+ u8 __iomem *ctrl_mem, u8 __iomem *data,
+ unsigned int length, unsigned int offset,
+ bool rx_stream_scan)
+{
+ /* Ignore the legacy TLV if new one was already parsed */
+ if (caps->tls_resync_ss && !rx_stream_scan)
+ return true;
+
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return false;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->tls_resync_ss = rx_stream_scan;
+
+ return true;
+}
+
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps)
{
@@ -104,15 +128,25 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->mbox_cmsg_types = readl(data);
break;
case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
- if (length < 32) {
- dev_err(dev,
- "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
- length, offset);
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ false))
return -EINVAL;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ if ((data - ctrl_mem) % 8) {
+ dev_warn(dev, "VNIC STATS TLV misaligned, ignoring offset:%u len:%u\n",
+ offset, length);
+ break;
}
-
- caps->crypto_ops = readl(data);
- caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->vnic_stats_off = data - ctrl_mem;
+ caps->vnic_stats_cnt = length / 10;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ true))
+ return -EINVAL;
break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ee6b24e4eacd..3d61a8cb60b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -45,6 +45,7 @@
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
+#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */
#define NFP_META_PORT_ID_CTRL ~0U
@@ -479,6 +480,22 @@
* 8 words, bitmaps of supported and enabled crypto operations.
* First 16B (4 words) contains a bitmap of supported crypto operations,
* and next 16B contain the enabled operations.
+ * This capability is made obsolete by ones with better sync methods.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ * Variable, per-vNIC statistics, data should be 8B aligned (FW should insert
+ * zero-length RESERVED TLV to pad).
+ * TLV data has two sections. First is an array of statistics' IDs (2B each).
+ * Second 8B statistics themselves. Statistics are 8B aligned, meaning there
+ * may be a padding between sections.
+ * Number of statistics can be determined as floor(tlv.length / (2 + 8)).
+ * This TLV overwrites %NFP_NET_CFG_STATS_* values (statistics in this TLV
+ * duplicate the old ones, so driver should be careful not to unnecessarily
+ * render both).
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan
+ * RX sync, rather than kernel-assisted sync.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -490,6 +507,8 @@
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
+#define NFP_NET_CFG_TLV_TYPE_VNIC_STATS 12
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN 13
struct device;
@@ -502,6 +521,9 @@ struct device;
* @mbox_cmsg_types: cmsgs which can be passed through the mailbox
* @crypto_ops: supported crypto operations
* @crypto_enable_off: offset of crypto ops enable region
+ * @vnic_stats_off: offset of vNIC stats area
+ * @vnic_stats_cnt: number of vNIC stats
+ * @tls_resync_ss: TLS resync will be performed via stream scan
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
@@ -511,6 +533,9 @@ struct nfp_net_tlv_caps {
u32 mbox_cmsg_types;
u32 crypto_ops;
unsigned int crypto_enable_off;
+ unsigned int vnic_stats_off;
+ unsigned int vnic_stats_cnt;
+ unsigned int tls_resync_ss:1;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b840ee47339..d648e32c0520 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -148,11 +148,33 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
};
+static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
+ [1] = "dev_rx_discards",
+ [2] = "dev_rx_errors",
+ [3] = "dev_rx_bytes",
+ [4] = "dev_rx_uc_bytes",
+ [5] = "dev_rx_mc_bytes",
+ [6] = "dev_rx_bc_bytes",
+ [7] = "dev_rx_pkts",
+ [8] = "dev_rx_mc_pkts",
+ [9] = "dev_rx_bc_pkts",
+
+ [10] = "dev_tx_discards",
+ [11] = "dev_tx_errors",
+ [12] = "dev_tx_bytes",
+ [13] = "dev_tx_uc_bytes",
+ [14] = "dev_tx_mc_bytes",
+ [15] = "dev_tx_bc_bytes",
+ [16] = "dev_tx_pkts",
+ [17] = "dev_tx_mc_pkts",
+ [18] = "dev_tx_bc_pkts",
+};
+
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
-#define NN_CTRL_PATH_STATS 1
+#define NN_CTRL_PATH_STATS 4
#define SFP_SFF_REV_COMPLIANCE 1
@@ -454,6 +476,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
data = nfp_pr_et(data, "hw_tls_no_space");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ok");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ign");
+ data = nfp_pr_et(data, "rx_tls_resync_sent");
return data;
}
@@ -502,6 +527,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
*data++ = gathered_stats[j];
*data++ = atomic_read(&nn->ktls_no_space);
+ *data++ = atomic_read(&nn->ktls_rx_resync_req);
+ *data++ = atomic_read(&nn->ktls_rx_resync_ign);
+ *data++ = atomic_read(&nn->ktls_rx_resync_sent);
return data;
}
@@ -560,6 +588,65 @@ nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
return data;
}
+static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
+{
+ return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
+}
+
+static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
+{
+ unsigned int i, id;
+ u8 __iomem *mem;
+ u64 id_word = 0;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
+ if (!(i % 4))
+ id_word = readq(mem + i * 2);
+
+ id = (u16)id_word;
+ id_word >>= 16;
+
+ if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
+ nfp_tlv_stat_names[id][0]) {
+ memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ } else {
+ data = nfp_pr_et(data, "dev_unknown_stat%u", id);
+ }
+ }
+
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ data = nfp_pr_et(data, "rxq_%u_pkts", i);
+ data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
+ }
+
+ return data;
+}
+
+static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
+{
+ u8 __iomem *mem;
+ unsigned int i;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
+ *data++ = readq(mem + i * 8);
+
+ mem = nn->dp.ctrl_bar;
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
+ }
+
+ return data;
+}
+
static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
{
struct nfp_port *port;
@@ -609,8 +696,12 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
- false);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats_strings(data,
+ nn->max_r_vecs,
+ false);
+ else
+ data = nfp_vnic_get_tlv_stats_strings(nn, data);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
@@ -624,7 +715,11 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
+ nn->max_r_vecs);
+ else
+ data = nfp_vnic_get_tlv_stats(nn, data);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -632,13 +727,18 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
{
struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int cnt;
switch (sset) {
case ETH_SS_STATS:
- return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
- nfp_mac_get_stats_count(netdev) +
- nfp_app_port_get_stats_count(nn->port);
+ cnt = nfp_vnic_get_sw_stats_count(netdev);
+ if (!nn->tlv_caps.vnic_stats_off)
+ cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
+ else
+ cnt += nfp_vnic_get_tlv_stats_count(nn);
+ cnt += nfp_mac_get_stats_count(netdev);
+ cnt += nfp_app_port_get_stats_count(nn->port);
+ return cnt;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e4977cdf7678..c0e2f4394aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
+ ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
@@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
- vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+ vf->q_bar = ioremap(map_addr, bar_sz);
if (!vf->q_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* TX queues */
map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
- nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
+ nn->tx_bar = ioremap(map_addr, tx_bar_sz);
if (!nn->tx_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* RX queues */
map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
- nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
+ nn->rx_bar = ioremap(map_addr, rx_bar_sz);
if (!nn->rx_bar) {
nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
err = -EIO;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 85d46f206b3c..b454db283aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
int pf;
@@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
}
bar = &nfp->bar[4 + i];
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
msg += snprintf(msg, end - msg,
@@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area)
priv->iomem = priv->bar->iomem + priv->bar_offset;
else
/* Must have been too big. Sub-allocate. */
- priv->iomem = ioremap_nocache(priv->phys, priv->size);
+ priv->iomem = ioremap(priv->phys, priv->size);
if (IS_ERR_OR_NULL(priv->iomem)) {
dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 6b54cb3b681d..2fc10a36afa4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2739,7 +2739,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
* nv_tx_timeout: dev->tx_timeout function
* Called with netif_tx_lock held.
*/
-static void nv_tx_timeout(struct net_device *dev)
+static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 656169214cdb..d20cf03a3ea0 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1149,19 +1149,6 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
spin_unlock_irqrestore(&pldat->lock, flags);
}
-static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, req, cmd);
-}
-
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
@@ -1229,7 +1216,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_stop = lpc_eth_close,
.ndo_start_xmit = lpc_eth_hard_start_xmit,
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
- .ndo_do_ioctl = lpc_eth_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = lpc_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 18e6d87c607b..73ec195fbc30 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2271,7 +2271,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* pch_gbe_tx_timeout - Respond to a Tx Hang
* @netdev: Network interface device structure
*/
-static void pch_gbe_tx_timeout(struct net_device *netdev)
+static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index eee883a2aa8d..70816d2e2990 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -548,7 +548,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int hamachi_open(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void hamachi_timer(struct timer_list *t);
-static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void hamachi_init_ring(struct net_device *dev);
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -1042,7 +1042,7 @@ static void hamachi_timer(struct timer_list *t)
add_timer(&hmp->timer);
}
-static void hamachi_tx_timeout(struct net_device *dev)
+static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct hamachi_private *hmp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 5113ee647090..520779f05e1a 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -344,7 +344,7 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(struct timer_list *t);
-static void yellowfin_tx_timeout(struct net_device *dev);
+static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int yellowfin_init_ring(struct net_device *dev);
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -677,7 +677,7 @@ static void yellowfin_timer(struct timer_list *t)
add_timer(&yp->timer);
}
-static void yellowfin_tx_timeout(struct net_device *dev)
+static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 98e102af7756..bb106a32f416 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,19 +12,27 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.18.0-k"
+#define IONIC_DRV_VERSION "0.20.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_SUBDEV_ID_NAPLES_25 0x4000
-#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001
-#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002
-
#define DEVCMD_TIMEOUT 10
+struct ionic_vf {
+ u16 index;
+ u8 macaddr[6];
+ __le32 maxrate;
+ __le16 vlanid;
+ u8 spoofchk;
+ u8 trusted;
+ u8 linkstate;
+ dma_addr_t stats_pa;
+ struct ionic_lif_stats stats;
+};
+
struct ionic {
struct pci_dev *pdev;
struct device *dev;
@@ -46,6 +54,9 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
struct notifier_block nb;
+ struct rw_semaphore vf_op_lock; /* lock for VF operations */
+ struct ionic_vf *vfs;
+ int num_vfs;
struct timer_list watchdog_timer;
int watchdog_period;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 9a9ab8cb2cb3..448d7b23b2f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -104,10 +104,112 @@ void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
iounmap(page);
}
+static void ionic_vf_dealloc_locked(struct ionic *ionic)
+{
+ struct ionic_vf *v;
+ dma_addr_t dma = 0;
+ int i;
+
+ if (!ionic->vfs)
+ return;
+
+ for (i = ionic->num_vfs - 1; i >= 0; i--) {
+ v = &ionic->vfs[i];
+
+ if (v->stats_pa) {
+ (void)ionic_set_vf_config(ionic, i,
+ IONIC_VF_ATTR_STATSADDR,
+ (u8 *)&dma);
+ dma_unmap_single(ionic->dev, v->stats_pa,
+ sizeof(v->stats), DMA_FROM_DEVICE);
+ v->stats_pa = 0;
+ }
+ }
+
+ kfree(ionic->vfs);
+ ionic->vfs = NULL;
+ ionic->num_vfs = 0;
+}
+
+static void ionic_vf_dealloc(struct ionic *ionic)
+{
+ down_write(&ionic->vf_op_lock);
+ ionic_vf_dealloc_locked(ionic);
+ up_write(&ionic->vf_op_lock);
+}
+
+static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
+{
+ struct ionic_vf *v;
+ int err = 0;
+ int i;
+
+ down_write(&ionic->vf_op_lock);
+
+ ionic->vfs = kcalloc(num_vfs, sizeof(struct ionic_vf), GFP_KERNEL);
+ if (!ionic->vfs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ v = &ionic->vfs[i];
+ v->stats_pa = dma_map_single(ionic->dev, &v->stats,
+ sizeof(v->stats), DMA_FROM_DEVICE);
+ if (dma_mapping_error(ionic->dev, v->stats_pa)) {
+ v->stats_pa = 0;
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* ignore failures from older FW, we just won't get stats */
+ (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
+ (u8 *)&v->stats_pa);
+ ionic->num_vfs++;
+ }
+
+out:
+ if (err)
+ ionic_vf_dealloc_locked(ionic);
+ up_write(&ionic->vf_op_lock);
+ return err;
+}
+
+static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct device *dev = ionic->dev;
+ int ret = 0;
+
+ if (num_vfs > 0) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot enable SRIOV: %d\n", ret);
+ goto out;
+ }
+
+ ret = ionic_vf_alloc(ionic, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot alloc VFs: %d\n", ret);
+ pci_disable_sriov(pdev);
+ goto out;
+ }
+
+ ret = num_vfs;
+ } else {
+ pci_disable_sriov(pdev);
+ ionic_vf_dealloc(ionic);
+ }
+
+out:
+ return ret;
+}
+
static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ionic *ionic;
+ int num_vfs;
int err;
ionic = ionic_devlink_alloc(dev);
@@ -206,6 +308,15 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_lifs;
}
+ init_rwsem(&ionic->vf_op_lock);
+ num_vfs = pci_num_vf(pdev);
+ if (num_vfs) {
+ dev_info(dev, "%d VFs found already enabled\n", num_vfs);
+ err = ionic_vf_alloc(ionic, num_vfs);
+ if (err)
+ dev_err(dev, "Cannot enable existing VFs: %d\n", err);
+ }
+
err = ionic_lifs_register(ionic);
if (err) {
dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
@@ -223,6 +334,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_deregister_lifs:
ionic_lifs_unregister(ionic);
err_out_deinit_lifs:
+ ionic_vf_dealloc(ionic);
ionic_lifs_deinit(ionic);
err_out_free_lifs:
ionic_lifs_free(ionic);
@@ -279,6 +391,7 @@ static struct pci_driver ionic_driver = {
.id_table = ionic_id_table,
.probe = ionic_probe,
.remove = ionic_remove,
+ .sriov_configure = ionic_sriov_configure,
};
int ionic_bus_register_driver(void)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 5f9d2ec70446..87f82f36812f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -286,6 +286,64 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
ionic_dev_cmd_go(idev, &cmd);
}
+/* VF commands */
+int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
+ .vf_setattr.attr = attr,
+ .vf_setattr.vf_index = vf,
+ };
+ int err;
+
+ switch (attr) {
+ case IONIC_VF_ATTR_SPOOFCHK:
+ cmd.vf_setattr.spoofchk = *data;
+ dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_TRUST:
+ cmd.vf_setattr.trust = *data;
+ dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_LINKSTATE:
+ cmd.vf_setattr.linkstate = *data;
+ dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_MAC:
+ ether_addr_copy(cmd.vf_setattr.macaddr, data);
+ dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
+ __func__, vf, data);
+ break;
+ case IONIC_VF_ATTR_VLAN:
+ cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
+ __func__, vf, *(u16 *)data);
+ break;
+ case IONIC_VF_ATTR_RATE:
+ cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
+ __func__, vf, *(u32 *)data);
+ break;
+ case IONIC_VF_ATTR_STATSADDR:
+ cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n",
+ __func__, vf, *(u64 *)data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
/* LIF commands */
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 4665c5dc5324..7838e342c4fd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -113,6 +113,12 @@ static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
+/* SR/IOV */
+static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
+static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+
struct ionic_devinfo {
u8 asic_type;
u8 asic_rev;
@@ -275,6 +281,7 @@ void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
+int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 39317cdfa6cf..f131adad96e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -51,6 +51,10 @@ enum ionic_cmd_opcode {
IONIC_CMD_RDMA_CREATE_CQ = 52,
IONIC_CMD_RDMA_CREATE_ADMINQ = 53,
+ /* SR/IOV commands */
+ IONIC_CMD_VF_GETATTR = 60,
+ IONIC_CMD_VF_SETATTR = 61,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -1639,6 +1643,93 @@ enum ionic_qos_sched_type {
IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
};
+enum ionic_vf_attr {
+ IONIC_VF_ATTR_SPOOFCHK = 1,
+ IONIC_VF_ATTR_TRUST = 2,
+ IONIC_VF_ATTR_MAC = 3,
+ IONIC_VF_ATTR_LINKSTATE = 4,
+ IONIC_VF_ATTR_VLAN = 5,
+ IONIC_VF_ATTR_RATE = 6,
+ IONIC_VF_ATTR_STATSADDR = 7,
+};
+
+/**
+ * VF link status
+ */
+enum ionic_vf_link_status {
+ IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
+ IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
+ IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+};
+
+/**
+ * struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
+ * @opcode: Opcode
+ * @index: VF index
+ * @attr: Attribute type (enum ionic_vf_attr)
+ * macaddr mac address
+ * vlanid vlan ID
+ * maxrate max Tx rate in Mbps
+ * spoofchk enable address spoof checking
+ * trust enable VF trust
+ * linkstate set link up or down
+ * stats_pa set DMA address for VF stats
+ */
+struct ionic_vf_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[60];
+ };
+};
+
+struct ionic_vf_setattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ __le16 comp_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
+ * @opcode: Opcode
+ * @index: VF index
+ * @attr: Attribute type (enum ionic_vf_attr)
+ */
+struct ionic_vf_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ u8 rsvd[60];
+};
+
+struct ionic_vf_getattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[11];
+ };
+ u8 color;
+};
+
/**
* union ionic_qos_config - Qos configuration structure
* @flags: Configuration flags
@@ -2289,6 +2380,9 @@ union ionic_dev_cmd {
struct ionic_port_getattr_cmd port_getattr;
struct ionic_port_setattr_cmd port_setattr;
+ struct ionic_vf_setattr_cmd vf_setattr;
+ struct ionic_vf_getattr_cmd vf_getattr;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -2318,6 +2412,9 @@ union ionic_dev_cmd_comp {
struct ionic_port_getattr_comp port_getattr;
struct ionic_port_setattr_comp port_setattr;
+ struct ionic_vf_setattr_comp vf_setattr;
+ struct ionic_vf_getattr_comp vf_getattr;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index ef8258713369..191271f6260d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1285,7 +1285,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
rtnl_unlock();
}
-static void ionic_tx_timeout(struct net_device *netdev)
+static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1619,6 +1619,227 @@ int ionic_stop(struct net_device *netdev)
return err;
}
+static int ionic_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret = 0;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ivf->vf = vf;
+ ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->qos = 0;
+ ivf->spoofchk = ionic->vfs[vf].spoofchk;
+ ivf->linkstate = ionic->vfs[vf].linkstate;
+ ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->trusted = ionic->vfs[vf].trusted;
+ ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ struct ionic_lif_stats *vs;
+ int ret = 0;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ memset(vf_stats, 0, sizeof(*vf_stats));
+ vs = &ionic->vfs[vf].stats;
+
+ vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
+ vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
+ vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
+ vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
+ vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
+ vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
+ vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
+ le64_to_cpu(vs->rx_mcast_drop_packets) +
+ le64_to_cpu(vs->rx_bcast_drop_packets);
+ vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
+ le64_to_cpu(vs->tx_mcast_drop_packets) +
+ le64_to_cpu(vs->tx_bcast_drop_packets);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
+ return -EINVAL;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
+ if (!ret)
+ ether_addr_copy(ionic->vfs[vf].macaddr, mac);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 proto)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ /* until someday when we support qos */
+ if (qos)
+ return -EINVAL;
+
+ if (vlan > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
+ if (!ret)
+ ionic->vfs[vf].vlanid = vlan;
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_rate(struct net_device *netdev, int vf,
+ int tx_min, int tx_max)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ /* setting the min just seems silly */
+ if (tx_min)
+ return -EINVAL;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
+ if (!ret)
+ lif->ionic->vfs[vf].maxrate = tx_max;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data = set; /* convert to u8 for config */
+ int ret;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_SPOOFCHK, &data);
+ if (!ret)
+ ionic->vfs[vf].spoofchk = data;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data = set; /* convert to u8 for config */
+ int ret;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_TRUST, &data);
+ if (!ret)
+ ionic->vfs[vf].trusted = data;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data;
+ int ret;
+
+ switch (set) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ data = IONIC_VF_LINK_STATUS_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ data = IONIC_VF_LINK_STATUS_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ data = IONIC_VF_LINK_STATUS_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_LINKSTATE, &data);
+ if (!ret)
+ ionic->vfs[vf].linkstate = set;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
@@ -1632,6 +1853,14 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_change_mtu = ionic_change_mtu,
.ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
+ .ndo_set_vf_vlan = ionic_set_vf_vlan,
+ .ndo_set_vf_trust = ionic_set_vf_trust,
+ .ndo_set_vf_mac = ionic_set_vf_mac,
+ .ndo_set_vf_rate = ionic_set_vf_rate,
+ .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
+ .ndo_get_vf_config = ionic_get_vf_config,
+ .ndo_set_vf_link_state = ionic_set_vf_link_state,
+ .ndo_get_vf_stats = ionic_get_vf_stats,
};
int ionic_reset_queues(struct ionic_lif *lif)
@@ -1965,18 +2194,22 @@ static int ionic_station_set(struct ionic_lif *lif)
if (err)
return err;
+ if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
+ return 0;
+
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
- if (err)
- return err;
-
- if (!is_zero_ether_addr(netdev->dev_addr)) {
- netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
- netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, false);
+ if (err) {
+ netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
+ addr.sa_data);
+ return 0;
}
+ netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
+ netdev->dev_addr);
+ ionic_lif_addr(lif, netdev->dev_addr, false);
+
eth_commit_mac_addr_change(netdev, &addr);
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a55fd1f8c31b..9c5a7dd45f9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_rx_stats {
u64 csum_complete;
u64 csum_error;
u64 buffers_posted;
+ u64 dropped;
};
#define IONIC_QCQ_F_INITED BIT(0)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 3590ea7fd88a..a8e3fb73b465 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -165,6 +165,10 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
+ case IONIC_CMD_VF_GETATTR:
+ return "IONIC_CMD_VF_GETATTR";
+ case IONIC_CMD_VF_SETATTR:
+ return "IONIC_CMD_VF_SETATTR";
default:
return "DEVCMD_UNKNOWN";
}
@@ -326,9 +330,9 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
unsigned long max_wait;
unsigned long duration;
int opcode;
+ int hb = 0;
int done;
int err;
- int hb;
WARN_ON(in_interrupt());
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 03916b6d47f2..a1e9796a660a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -39,6 +39,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_none),
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
+ IONIC_RX_STAT_DESC(dropped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 97e79949b359..e452f4242ba0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -152,12 +152,16 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
stats = q_to_rx_stats(q);
netdev = q->lif->netdev;
- if (comp->status)
+ if (comp->status) {
+ stats->dropped++;
return;
+ }
/* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ stats->dropped++;
return;
+ }
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
@@ -167,8 +171,10 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
else
skb = ionic_rx_frags(q, desc_info, cq_info);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ stats->dropped++;
return;
+ }
skb_record_rx_queue(skb, q->index);
@@ -337,6 +343,8 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_rxq_sg_desc *sg_desc;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_rxq_desc *desc;
+ unsigned int remain_len;
+ unsigned int seg_len;
unsigned int nfrags;
bool ring_doorbell;
unsigned int i, j;
@@ -346,6 +354,7 @@ void ionic_rx_fill(struct ionic_queue *q)
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
+ remain_len = len;
desc_info = q->head;
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
@@ -369,7 +378,9 @@ void ionic_rx_fill(struct ionic_queue *q)
return;
}
desc->addr = cpu_to_le64(page_info->dma_addr);
- desc->len = cpu_to_le16(PAGE_SIZE);
+ seg_len = min_t(unsigned int, PAGE_SIZE, len);
+ desc->len = cpu_to_le16(seg_len);
+ remain_len -= seg_len;
page_info++;
/* fill sg descriptors - pages[1..n] */
@@ -385,7 +396,9 @@ void ionic_rx_fill(struct ionic_queue *q)
return;
}
sg_elem->addr = cpu_to_le64(page_info->dma_addr);
- sg_elem->len = cpu_to_le16(PAGE_SIZE);
+ seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
+ sg_elem->len = cpu_to_le16(seg_len);
+ remain_len -= seg_len;
page_info++;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c692a41e4548..8067ea04d455 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -49,7 +49,7 @@ static int netxen_nic_open(struct net_device *netdev);
static int netxen_nic_close(struct net_device *netdev);
static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
struct net_device *);
-static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static void netxen_tx_timeout_task(struct work_struct *work);
static void netxen_fw_poll_work(struct work_struct *work);
static void netxen_schedule_work(struct netxen_adapter *adapter,
@@ -2222,7 +2222,7 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netxen_advert_link_change(adapter, linkup);
}
-static void netxen_tx_timeout(struct net_device *netdev)
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 89fe091c958d..fa41bf08a589 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -253,7 +253,8 @@ enum qed_resources {
QED_VLAN,
QED_RDMA_CNQ_RAM,
QED_ILT,
- QED_LL2_QUEUE,
+ QED_LL2_RAM_QUEUE,
+ QED_LL2_CTX_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_BDQ,
@@ -461,6 +462,8 @@ struct qed_fw_data {
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u32 init_ops_size;
};
@@ -531,6 +534,23 @@ struct qed_nvm_image_info {
bool valid;
};
+enum qed_hsi_def_type {
+ QED_HSI_DEF_MAX_NUM_VFS,
+ QED_HSI_DEF_MAX_NUM_L2_QUEUES,
+ QED_HSI_DEF_MAX_NUM_PORTS,
+ QED_HSI_DEF_MAX_SB_PER_PATH,
+ QED_HSI_DEF_MAX_NUM_PFS,
+ QED_HSI_DEF_MAX_NUM_VPORTS,
+ QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
+ QED_HSI_DEF_MAX_QM_TX_QUEUES,
+ QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
+ QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+ QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
+ QED_HSI_DEF_MAX_PBF_CMD_LINES,
+ QED_HSI_DEF_MAX_BTB_BLOCKS,
+ QED_NUM_HSI_DEFS
+};
+
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
@@ -646,6 +666,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
void *dbg_user_info;
+ struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
/* PWM region specific data */
u16 wid_count;
@@ -668,6 +689,7 @@ struct qed_hwfn {
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
+ struct phys_mem_desc *fw_overlay_mem;
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
@@ -796,8 +818,8 @@ struct qed_dev {
u8 cache_shift;
/* Init */
- const struct iro *iro_arr;
-#define IRO (p_hwfn->cdev->iro_arr)
+ const u32 *iro_arr;
+#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
/* HW functions */
u8 num_hwfns;
@@ -856,6 +878,8 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
+ struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
@@ -868,16 +892,35 @@ struct qed_dev {
bool iwarp_cmt;
};
-#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
- : MAX_NUM_VFS_K2)
-#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
- : MAX_NUM_L2_QUEUES_K2)
-#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
- : MAX_NUM_PORTS_K2)
-#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
- : MAX_SB_PER_PATH_K2)
-#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
- : MAX_NUM_PFS_K2)
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
+
+#define NUM_OF_VFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
+#define NUM_OF_L2_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
+#define NUM_OF_PORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
+#define NUM_OF_SBS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
+#define NUM_OF_ENG_PFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
+#define NUM_OF_VPORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
+#define NUM_OF_RSS_ENGINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
+#define NUM_OF_QM_TX_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
+#define NUM_OF_PXP_ILT_RECORDS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
+#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
+#define NUM_OF_QM_GLOBAL_RLS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
+#define NUM_OF_PBF_CMD_LINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
+#define NUM_OF_BTB_BLOCKS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
+
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 8e1bdf58b9e7..fbfff2b1dc93 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -50,12 +50,6 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-/* Max number of connection types in HW (DQ/CDU etc.) */
-#define MAX_CONN_TYPES PROTOCOLID_COMMON
-#define NUM_TASK_TYPES 2
-#define NUM_TASK_PF_SEGMENTS 4
-#define NUM_TASK_VF_SEGMENTS 1
-
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -123,126 +117,6 @@ struct src_ent {
/* Alignment is inherent to the type1_task_context structure */
#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
-/* PF per protocl configuration object */
-#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
-#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
-
-struct qed_tid_seg {
- u32 count;
- u8 type;
- bool has_fl_mem;
-};
-
-struct qed_conn_type_cfg {
- u32 cid_count;
- u32 cids_per_vf;
- struct qed_tid_seg tid_seg[TASK_SEGMENTS];
-};
-
-/* ILT Client configuration, Per connection type (protocol) resources. */
-#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
-#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
-#define CDUC_BLK (0)
-#define SRQ_BLK (0)
-#define CDUT_SEG_BLK(n) (1 + (u8)(n))
-#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
-
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_MAX
-};
-
-struct ilt_cfg_pair {
- u32 reg;
- u32 val;
-};
-
-struct qed_ilt_cli_blk {
- u32 total_size; /* 0 means not active */
- u32 real_size_in_page;
- u32 start_line;
- u32 dynamic_line_cnt;
-};
-
-struct qed_ilt_client_cfg {
- bool active;
-
- /* ILT boundaries */
- struct ilt_cfg_pair first;
- struct ilt_cfg_pair last;
- struct ilt_cfg_pair p_size;
-
- /* ILT client blocks for PF */
- struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
- u32 pf_total_lines;
-
- /* ILT client blocks for VFs */
- struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
- u32 vf_total_lines;
-};
-
-/* Per Path -
- * ILT shadow table
- * Protocol acquired CID lists
- * PF start line in ILT
- */
-struct qed_dma_mem {
- dma_addr_t p_phys;
- void *p_virt;
- size_t size;
-};
-
-struct qed_cid_acquired_map {
- u32 start_cid;
- u32 max_count;
- unsigned long *cid_map;
-};
-
-struct qed_cxt_mngr {
- /* Per protocl configuration */
- struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
-
- /* computed ILT structure */
- struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
-
- /* Task type sizes */
- u32 task_type_size[NUM_TASK_TYPES];
-
- /* total number of VFs for this hwfn -
- * ALL VFs are symmetric in terms of HW resources
- */
- u32 vf_count;
-
- /* Acquired CIDs */
- struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
-
- struct qed_cid_acquired_map
- acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
-
- /* ILT shadow table */
- struct qed_dma_mem *ilt_shadow;
- u32 pf_start_line;
-
- /* Mutex for a dynamic ILT allocation */
- struct mutex mutex;
-
- /* SRC T2 */
- struct qed_dma_mem *t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
-
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
- /* Maximal number of L2 steering filters */
- u32 arfs_count;
-};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
@@ -880,30 +754,60 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
{
- struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
u32 i;
- if (!p_mngr->t2)
+ if (!p_t2 || !p_t2->dma_mem)
return;
- for (i = 0; i < p_mngr->t2_num_pages; i++)
- if (p_mngr->t2[i].p_virt)
+ for (i = 0; i < p_t2->num_pages; i++)
+ if (p_t2->dma_mem[i].virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_mngr->t2[i].size,
- p_mngr->t2[i].p_virt,
- p_mngr->t2[i].p_phys);
+ p_t2->dma_mem[i].size,
+ p_t2->dma_mem[i].virt_addr,
+ p_t2->dma_mem[i].phys_addr);
+
+ kfree(p_t2->dma_mem);
+ p_t2->dma_mem = NULL;
+}
+
+static int
+qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
+ struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
+{
+ void **p_virt;
+ u32 size, i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return -EINVAL;
- kfree(p_mngr->t2);
- p_mngr->t2 = NULL;
+ for (i = 0; i < p_t2->num_pages; i++) {
+ size = min_t(u32, total_size, page_size);
+ p_virt = &p_t2->dma_mem[i].virt_addr;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_t2->dma_mem[i].phys_addr,
+ GFP_KERNEL);
+ if (!p_t2->dma_mem[i].virt_addr)
+ return -ENOMEM;
+
+ memset(*p_virt, 0, size);
+ p_t2->dma_mem[i].size = size;
+ total_size -= size;
+ }
+
+ return 0;
}
static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_num, total_size, ent_per_page, psz, i;
+ struct phys_mem_desc *p_t2_last_page;
struct qed_ilt_client_cfg *p_src;
struct qed_src_iids src_iids;
- struct qed_dma_mem *p_t2;
+ struct qed_src_t2 *p_t2;
int rc;
memset(&src_iids, 0, sizeof(src_iids));
@@ -921,49 +825,39 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
/* use the same page size as the SRC ILT client */
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
- p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+ p_t2 = &p_mngr->src_t2;
+ p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
- GFP_KERNEL);
- if (!p_mngr->t2) {
+ p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!p_t2->dma_mem) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
- /* allocate t2 pages */
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
- u32 size = min_t(u32, total_size, psz);
- void **p_virt = &p_mngr->t2[i].p_virt;
-
- *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
- &p_mngr->t2[i].p_phys,
- GFP_KERNEL);
- if (!p_mngr->t2[i].p_virt) {
- rc = -ENOMEM;
- goto t2_fail;
- }
- p_mngr->t2[i].size = size;
- total_size -= size;
- }
+ rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
+ if (rc)
+ goto t2_fail;
/* Set the t2 pointers */
/* entries per page - must be a power of two */
ent_per_page = psz / sizeof(struct src_ent);
- p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+ p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
- p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
- p_mngr->last_free = (u64) p_t2->p_phys +
+ p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
+ p_t2->last_free = (u64)p_t2_last_page->phys_addr +
((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ for (i = 0; i < p_t2->num_pages; i++) {
u32 ent_num = min_t(u32,
ent_per_page,
conn_num);
- struct src_ent *entries = p_mngr->t2[i].p_virt;
- u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+ struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
+ u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
u32 j;
for (j = 0; j < ent_num - 1; j++) {
@@ -971,8 +865,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
entries[j].next = cpu_to_be64(val);
}
- if (i < p_mngr->t2_num_pages - 1)
- val = (u64) p_mngr->t2[i + 1].p_phys;
+ if (i < p_t2->num_pages - 1)
+ val = (u64)p_t2->dma_mem[i + 1].phys_addr;
else
val = 0;
entries[j].next = cpu_to_be64(val);
@@ -988,7 +882,7 @@ t2_fail:
}
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
if (!clients[pos].active) { \
continue; \
} else \
@@ -1014,13 +908,13 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
ilt_size = qed_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
- struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+ struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
- if (p_dma->p_virt)
+ if (p_dma->virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_dma->size, p_dma->p_virt,
- p_dma->p_phys);
- p_dma->p_virt = NULL;
+ p_dma->size, p_dma->virt_addr,
+ p_dma->phys_addr);
+ p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
}
@@ -1030,7 +924,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client,
u32 start_line_offset)
{
- struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */
@@ -1059,8 +953,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
if (!p_virt)
return -ENOMEM;
- ilt_shadow[line].p_phys = p_phys;
- ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].phys_addr = p_phys;
+ ilt_shadow[line].virt_addr = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -1083,7 +977,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
int rc;
size = qed_cxt_ilt_shadow_size(clients);
- p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
rc = -ENOMEM;
@@ -1092,7 +986,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
- (u32)(size * sizeof(struct qed_dma_mem)));
+ (u32)(size * sizeof(struct phys_mem_desc)));
for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
@@ -1238,15 +1132,20 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 64K */
- for (i = 0; i < ILT_CLI_MAX; i++)
+ for (i = 0; i < MAX_ILT_CLIENTS; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
+
/* Initialize task sizes */
p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
- if (p_hwfn->cdev->p_iov_info)
+ if (p_hwfn->cdev->p_iov_info) {
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ p_mngr->first_vf_in_pf =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ }
/* Initialize the dynamic ILT allocation mutex */
mutex_init(&p_mngr->mutex);
@@ -1522,7 +1421,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
- params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
@@ -1674,7 +1572,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
- struct qed_dma_mem *p_shdw;
+ struct phys_mem_desc *p_shdw;
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
@@ -1699,15 +1597,15 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
/** p_virt could be NULL incase of dynamic
* allocation
*/
- if (p_shdw[line].p_virt) {
+ if (p_shdw[line].virt_addr) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
- (p_shdw[line].p_phys >> 12));
+ (p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
rt_offst, line, i,
- (u64)(p_shdw[line].p_phys >> 12));
+ (u64)(p_shdw[line].phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -2050,10 +1948,10 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
- if (!p_mngr->ilt_shadow[line].p_virt)
+ if (!p_mngr->ilt_shadow[line].virt_addr)
return -EINVAL;
- p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
@@ -2234,7 +2132,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < total_lines; i++) {
shadow_line = i + p_fl_seg->start_line -
p_hwfn->p_cxt_mngr->pf_start_line;
- p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
}
p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
p_fl_seg->real_size_in_page;
@@ -2296,7 +2194,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
- if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
goto out0;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -2334,8 +2232,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
}
}
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
p_blk->real_size_in_page;
@@ -2345,9 +2243,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
ilt_hw_entry = 0;
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
- SET_FIELD(ilt_hw_entry,
- ILT_ENTRY_PHY_ADDR,
- (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
+ >> 12));
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
@@ -2434,16 +2332,16 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
}
for (i = shadow_start_line; i < shadow_end_line; i++) {
- if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
continue;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
/* compute absolute offset */
@@ -2547,8 +2445,76 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
ilt_idx = tid / num_tids_per_block + p_seg->start_line -
p_mngr->pf_start_line;
- *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
(tid % num_tids_per_block) * tid_size;
return 0;
}
+
+static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
+{
+ if (p_blk->real_size_in_page == 0)
+ return 0;
+
+ return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+}
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 pages = 0, i;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 758a8b4c0de8..c4e815f6cabd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_FL_MEM 1
int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
u32 tid, u8 ctx_type, void **task_ctx);
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_offset;
+ u32 dynamic_line_cnt;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_src_t2 {
+ struct phys_mem_desc *dma_mem;
+ u32 num_pages;
+ u64 first_free;
+ u64 last_free;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+ u32 first_vf_in_pf;
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ struct qed_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
+ /* ILT shadow table */
+ struct phys_mem_desc *ilt_shadow;
+ u32 ilt_shadow_size;
+ u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_src_t2 src_t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ u8 task_type_id;
+ u16 task_ctx_size;
+ u16 conn_ctx_size;
+};
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 859caa6c1a1f..f4eebaabb6d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7,6 +7,7 @@
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include "qed.h"
+#include "qed_cxt.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
@@ -22,27 +23,28 @@ enum mem_groups {
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
- MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
+ MEM_GROUP_CAU_MEM_EXT,
MEM_GROUP_PXP_ILT,
- MEM_GROUP_TM_MEM,
- MEM_GROUP_SDM_MEM,
- MEM_GROUP_PBUF,
- MEM_GROUP_RAM,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
- MEM_GROUP_RDIF_CTX,
- MEM_GROUP_TDIF_CTX,
- MEM_GROUP_CFC_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
MEM_GROUP_BMB_RAM,
MEM_GROUP_BMB_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
MEM_GROUPS_NUM
};
@@ -56,27 +58,28 @@ static const char * const s_mem_group_names[] = {
"BRB_RAM",
"BRB_MEM",
"PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
"IOR",
+ "RAM",
"BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
- "TASK_CFC_MEM",
"CAU_PI",
"CAU_MEM",
+ "CAU_MEM_EXT",
"PXP_ILT",
- "TM_MEM",
- "SDM_MEM",
- "PBUF",
- "RAM",
"MULD_MEM",
"BTB_MEM",
- "RDIF_CTX",
- "TDIF_CTX",
- "CFC_MEM",
"IGU_MEM",
"IGU_MSIX",
"CAU_SB",
"BMB_RAM",
"BMB_MEM",
+ "TM_MEM",
+ "TASK_CFC_MEM",
};
/* Idle check conditions */
@@ -170,35 +173,66 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond13,
};
+#define NUM_PHYS_BLOCKS 84
+
+#define NUM_DBG_RESET_REGS 8
+
/******************************* Data Types **********************************/
-enum platform_ids {
- PLATFORM_ASIC,
+enum hw_types {
+ HW_TYPE_ASIC,
PLATFORM_RESERVED,
PLATFORM_RESERVED2,
PLATFORM_RESERVED3,
- MAX_PLATFORM_IDS
+ PLATFORM_RESERVED4,
+ MAX_HW_TYPES
+};
+
+/* CM context types */
+enum cm_ctx_types {
+ CM_CTX_CONN_AG,
+ CM_CTX_CONN_ST,
+ CM_CTX_TASK_AG,
+ CM_CTX_TASK_ST,
+ NUM_CM_CTX_TYPES
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
+ DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
+ DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
+ DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
+ DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
+ DBG_BUS_NUM_FRAME_MODES
};
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u32 num_ilt_pages;
};
-/* Platform constant definitions */
-struct platform_defs {
+/* HW type constant definitions */
+struct hw_type_defs {
const char *name;
u32 delay_factor;
u32 dmae_thresh;
u32 log_thresh;
};
+/* RBC reset definitions */
+struct rbc_reset_defs {
+ u32 reset_reg_addr;
+ u32 reset_val[MAX_CHIP_IDS];
+};
+
/* Storm constant definitions.
* Addresses are in bytes, sizes are in quad-regs.
*/
struct storm_defs {
char letter;
- enum block_id block_id;
+ enum block_id sem_block_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
bool has_vfc;
u32 sem_fast_mem_addr;
@@ -207,47 +241,26 @@ struct storm_defs {
u32 sem_slow_mode_addr;
u32 sem_slow_mode1_conf_addr;
u32 sem_sync_dbg_empty_addr;
- u32 sem_slow_dbg_empty_addr;
+ u32 sem_gpre_vect_addr;
u32 cm_ctx_wr_addr;
- u32 cm_conn_ag_ctx_lid_size;
- u32 cm_conn_ag_ctx_rd_addr;
- u32 cm_conn_st_ctx_lid_size;
- u32 cm_conn_st_ctx_rd_addr;
- u32 cm_task_ag_ctx_lid_size;
- u32 cm_task_ag_ctx_rd_addr;
- u32 cm_task_st_ctx_lid_size;
- u32 cm_task_st_ctx_rd_addr;
+ u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
+ u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
};
-/* Block constant definitions */
-struct block_defs {
- const char *name;
- bool exists[MAX_CHIP_IDS];
- bool associated_to_storm;
-
- /* Valid only if associated_to_storm is true */
- u32 storm_id;
- enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
- u32 dbg_select_addr;
- u32 dbg_enable_addr;
- u32 dbg_shift_addr;
- u32 dbg_force_valid_addr;
- u32 dbg_force_frame_addr;
- bool has_reset_bit;
-
- /* If true, block is taken out of reset before dump */
- bool unreset;
- enum dbg_reset_regs reset_reg;
-
- /* Bit offset in reset register */
- u8 reset_bit_offset;
+/* Debug Bus Constraint operation constant definitions */
+struct dbg_bus_constraint_op_defs {
+ u8 hw_op_val;
+ bool is_cyclic;
};
-/* Reset register definitions */
-struct reset_reg_defs {
- u32 addr;
+/* Storm Mode definitions */
+struct storm_mode_defs {
+ const char *name;
+ bool is_fast_dbg;
+ u8 id_in_hw;
+ u32 src_disable_reg_addr;
+ u32 src_enable_val;
bool exists[MAX_CHIP_IDS];
- u32 unreset_val[MAX_CHIP_IDS];
};
struct grc_param_defs {
@@ -257,7 +270,7 @@ struct grc_param_defs {
bool is_preset;
bool is_persistent;
u32 exclude_all_preset_val;
- u32 crash_preset_val;
+ u32 crash_preset_val[MAX_CHIP_IDS];
};
/* Address is in 128b units. Width is in bits. */
@@ -314,15 +327,7 @@ struct split_type_defs {
/******************************** Constants **********************************/
-#define MAX_LCIDS 320
-#define MAX_LTIDS 320
-
-#define NUM_IOR_SETS 2
-#define IORS_PER_SET 176
-#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
-
#define BYTES_IN_DWORD sizeof(u32)
-
/* In the macros below, size and offset are specified in bits */
#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
@@ -348,20 +353,17 @@ struct split_type_defs {
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
-#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
- do { \
- for (i = 0; i < (arr_size); i++) \
- (arr)[i] = qed_rd(dev, ptt, addr); \
- } while (0)
-
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-/* Extra lines include a signature line + optional latency events line */
-#define NUM_EXTRA_DBG_LINES(block_desc) \
- (1 + ((block_desc)->has_latency_events ? 1 : 0))
-#define NUM_DBG_LINES(block_desc) \
- ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
+/* extra lines include a signature line + optional latency events line */
+#define NUM_EXTRA_DBG_LINES(block) \
+ (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
+#define NUM_DBG_LINES(block) \
+ ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
+
+#define USE_DMAE true
+#define PROTECT_WIDE_BUS true
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
@@ -380,6 +382,9 @@ struct split_type_defs {
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define PAGE_MEM_DESC_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
+
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
@@ -425,7 +430,9 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 8
+#define NUM_COMMON_GLOBAL_PARAMS 9
+
+#define MAX_RECURSION_DEPTH 10
#define FW_IMG_MAIN 1
@@ -449,1054 +456,121 @@ struct split_type_defs {
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MAX_SW_PLTAFORM_STR_SIZE 64
+
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
-struct dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
-/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
-
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb"},
- {"ah"},
- {"reserved"},
+ {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
+ {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
- TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
- TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- TCM_REG_CTX_RBC_ACCS,
- 4, TCM_REG_AGG_CON_CTX,
- 16, TCM_REG_SM_CON_CTX,
- 2, TCM_REG_AGG_TASK_CTX,
- 4, TCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
+ TCM_REG_CTX_RBC_ACCS,
+ {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
+ TCM_REG_SM_TASK_CTX},
+ {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
+ },
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCM}, false,
- MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
- MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- MCM_REG_CTX_RBC_ACCS,
- 1, MCM_REG_AGG_CON_CTX,
- 10, MCM_REG_SM_CON_CTX,
- 2, MCM_REG_AGG_TASK_CTX,
- 7, MCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE_BB_K2,
+ MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ MSEM_REG_SLOW_DBG_MODE_BB_K2,
+ MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_SYNC_DBG_EMPTY,
+ MSEM_REG_DBG_GPRE_VECT,
+ MCM_REG_CTX_RBC_ACCS,
+ {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
+ MCM_REG_SM_TASK_CTX },
+ {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
+ },
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
- USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
- USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
- UCM_REG_CTX_RBC_ACCS,
- 2, UCM_REG_AGG_CON_CTX,
- 13, UCM_REG_SM_CON_CTX,
- 3, UCM_REG_AGG_TASK_CTX,
- 3, UCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE_BB_K2,
+ USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ USEM_REG_SLOW_DBG_MODE_BB_K2,
+ USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_SYNC_DBG_EMPTY,
+ USEM_REG_DBG_GPRE_VECT,
+ UCM_REG_CTX_RBC_ACCS,
+ {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
+ UCM_REG_SM_TASK_CTX},
+ {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
+ },
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
- XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
- XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- XCM_REG_CTX_RBC_ACCS,
- 9, XCM_REG_AGG_CON_CTX,
- 15, XCM_REG_SM_CON_CTX,
- 0, 0,
- 0, 0},
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE_BB_K2,
+ XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ XSEM_REG_SLOW_DBG_MODE_BB_K2,
+ XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_SYNC_DBG_EMPTY,
+ XSEM_REG_DBG_GPRE_VECT,
+ XCM_REG_CTX_RBC_ACCS,
+ {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
+ {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
+ },
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCY}, false,
- YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
- YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- YCM_REG_CTX_RBC_ACCS,
- 2, YCM_REG_AGG_CON_CTX,
- 3, YCM_REG_SM_CON_CTX,
- 2, YCM_REG_AGG_TASK_CTX,
- 12, YCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE_BB_K2,
+ YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ YSEM_REG_SLOW_DBG_MODE_BB_K2,
+ YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_SYNC_DBG_EMPTY,
+ YSEM_REG_DBG_GPRE_VECT,
+ YCM_REG_CTX_RBC_ACCS,
+ {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
+ YCM_REG_SM_TASK_CTX},
+ {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
+ },
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
- PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
- PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- PCM_REG_CTX_RBC_ACCS,
- 0, 0,
- 10, PCM_REG_SM_CON_CTX,
- 0, 0,
- 0, 0}
-};
-
-/* Block definitions array */
-
-static struct block_defs block_grc_defs = {
- "grc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
- GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
- GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
- GRC_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_UA, 1
-};
-
-static struct block_defs block_miscs_defs = {
- "miscs", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_misc_defs = {
- "misc", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_dbu_defs = {
- "dbu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_pglue_b_defs = {
- "pglue_b",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
- PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
- PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
- PGLUE_B_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 1
-};
-
-static struct block_defs block_cnig_defs = {
- "cnig",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCW},
- CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
- CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
- CNIG_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 0
-};
-
-static struct block_defs block_cpmu_defs = {
- "cpmu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 8
-};
-
-static struct block_defs block_ncsi_defs = {
- "ncsi",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
- NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
- NCSI_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 5
-};
-
-static struct block_defs block_opte_defs = {
- "opte", {true, true, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 4
-};
-
-static struct block_defs block_bmb_defs = {
- "bmb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
- BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
- BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
- BMB_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 7
-};
-
-static struct block_defs block_pcie_defs = {
- "pcie",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2_E5,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
- PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_mcp_defs = {
- "mcp", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_mcp2_defs = {
- "mcp2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
- MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
- MCP2_REG_DBG_FORCE_FRAME,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_pswhst_defs = {
- "pswhst",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
- PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
- PSWHST_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 0
-};
-
-static struct block_defs block_pswhst2_defs = {
- "pswhst2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
- PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
- PSWHST2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 0
-};
-
-static struct block_defs block_pswrd_defs = {
- "pswrd",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
- PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
- PSWRD_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 2
-};
-
-static struct block_defs block_pswrd2_defs = {
- "pswrd2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
- PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
- PSWRD2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 2
-};
-
-static struct block_defs block_pswwr_defs = {
- "pswwr",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
- PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
- PSWWR_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 3
-};
-
-static struct block_defs block_pswwr2_defs = {
- "pswwr2", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISC_PL_HV, 3
-};
-
-static struct block_defs block_pswrq_defs = {
- "pswrq",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
- PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
- PSWRQ_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 1
-};
-
-static struct block_defs block_pswrq2_defs = {
- "pswrq2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
- PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
- PSWRQ2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 1
-};
-
-static struct block_defs block_pglcs_defs = {
- "pglcs",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
- PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
- PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 2
-};
-
-static struct block_defs block_ptu_defs = {
- "ptu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
- PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
- PTU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
-};
-
-static struct block_defs block_dmae_defs = {
- "dmae",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
- DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
- DMAE_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
-};
-
-static struct block_defs block_tcm_defs = {
- "tcm",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
- TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
- TCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
-};
-
-static struct block_defs block_mcm_defs = {
- "mcm",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
- MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
- MCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
-};
-
-static struct block_defs block_ucm_defs = {
- "ucm",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
- UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
- UCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
-};
-
-static struct block_defs block_xcm_defs = {
- "xcm",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
- XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
- XCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
-};
-
-static struct block_defs block_ycm_defs = {
- "ycm",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
- YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
- YCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
-};
-
-static struct block_defs block_pcm_defs = {
- "pcm",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
- PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
- PCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
-};
-
-static struct block_defs block_qm_defs = {
- "qm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
- QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
- QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
- QM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
-};
-
-static struct block_defs block_tm_defs = {
- "tm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
- TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
- TM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
-};
-
-static struct block_defs block_dorq_defs = {
- "dorq",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
- DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
- DORQ_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
-};
-
-static struct block_defs block_brb_defs = {
- "brb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
- BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
- BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
- BRB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
-};
-
-static struct block_defs block_src_defs = {
- "src",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
- SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
- SRC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
-};
-
-static struct block_defs block_prs_defs = {
- "prs",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
- PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
- PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
- PRS_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
-};
-
-static struct block_defs block_tsdm_defs = {
- "tsdm",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
- TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
- TSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
-};
-
-static struct block_defs block_msdm_defs = {
- "msdm",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
- MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
- MSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
-};
-
-static struct block_defs block_usdm_defs = {
- "usdm",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
- USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
- USDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
-};
-
-static struct block_defs block_xsdm_defs = {
- "xsdm",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
- XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
- XSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
-};
-
-static struct block_defs block_ysdm_defs = {
- "ysdm",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
- YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
- YSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
-};
-
-static struct block_defs block_psdm_defs = {
- "psdm",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
- PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
- PSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
-};
-
-static struct block_defs block_tsem_defs = {
- "tsem",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
- TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
- TSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
-};
-
-static struct block_defs block_msem_defs = {
- "msem",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
- MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
- MSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
-};
-
-static struct block_defs block_usem_defs = {
- "usem",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
- USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
- USEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
-};
-
-static struct block_defs block_xsem_defs = {
- "xsem",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
- XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
- XSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
-};
-
-static struct block_defs block_ysem_defs = {
- "ysem",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
- YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
- YSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
-};
-
-static struct block_defs block_psem_defs = {
- "psem",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
- PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
- PSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
-};
-
-static struct block_defs block_rss_defs = {
- "rss",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
- RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
- RSS_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
-};
-
-static struct block_defs block_tmld_defs = {
- "tmld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
- TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
- TMLD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
-};
-
-static struct block_defs block_muld_defs = {
- "muld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
- MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
- MULD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
-};
-
-static struct block_defs block_yuld_defs = {
- "yuld",
- {true, true, false}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- MAX_DBG_BUS_CLIENTS},
- YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
- YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
- YULD_REG_DBG_FORCE_FRAME_BB_K2,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 15
-};
-
-static struct block_defs block_xyld_defs = {
- "xyld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
- XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
- XYLD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
-};
-
-static struct block_defs block_ptld_defs = {
- "ptld",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
- PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
- PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
- PTLD_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 28
-};
-
-static struct block_defs block_ypld_defs = {
- "ypld",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
- YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
- YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
- YPLD_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 27
-};
-
-static struct block_defs block_prm_defs = {
- "prm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
- PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
- PRM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
-};
-
-static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
- PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
- PBF_PB1_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 11
-};
-
-static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
- PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
- PBF_PB2_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 12
-};
-
-static struct block_defs block_rpb_defs = {
- "rpb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
- RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
- RPB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
-};
-
-static struct block_defs block_btb_defs = {
- "btb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
- BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
- BTB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
-};
-
-static struct block_defs block_pbf_defs = {
- "pbf",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
- PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
- PBF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
-};
-
-static struct block_defs block_rdif_defs = {
- "rdif",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
- RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
- RDIF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
-};
-
-static struct block_defs block_tdif_defs = {
- "tdif",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
- TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
- TDIF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
-};
-
-static struct block_defs block_cdu_defs = {
- "cdu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
- CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
- CDU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
-};
-
-static struct block_defs block_ccfc_defs = {
- "ccfc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
- CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
- CCFC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
-};
-
-static struct block_defs block_tcfc_defs = {
- "tcfc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
- TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
- TCFC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
-};
-
-static struct block_defs block_igu_defs = {
- "igu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
- IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
- IGU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
-};
-
-static struct block_defs block_cau_defs = {
- "cau",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
- CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
- CAU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
-};
-
-static struct block_defs block_rgfs_defs = {
- "rgfs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
-};
-
-static struct block_defs block_rgsrc_defs = {
- "rgsrc",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
- RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
- RGSRC_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 30
-};
-
-static struct block_defs block_tgfs_defs = {
- "tgfs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
-};
-
-static struct block_defs block_tgsrc_defs = {
- "tgsrc",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
- TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
- TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
- TGSRC_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 31
-};
-
-static struct block_defs block_umac_defs = {
- "umac",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
- UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
- UMAC_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 6
-};
-
-static struct block_defs block_xmac_defs = {
- "xmac", {true, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_dbg_defs = {
- "dbg", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
-};
-
-static struct block_defs block_nig_defs = {
- "nig",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
- NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
- NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
- NIG_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
-};
-
-static struct block_defs block_wol_defs = {
- "wol",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
- WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
- WOL_REG_DBG_FORCE_FRAME_K2_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
-};
-
-static struct block_defs block_bmbn_defs = {
- "bmbn",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
- BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
- BMBN_REG_DBG_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ipc_defs = {
- "ipc", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 8
-};
-
-static struct block_defs block_nwm_defs = {
- "nwm",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
- NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
- NWM_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
-};
-
-static struct block_defs block_nws_defs = {
- "nws",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
- NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
- NWS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 12
-};
-
-static struct block_defs block_ms_defs = {
- "ms",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
- MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
- MS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 13
-};
-
-static struct block_defs block_phy_pcie_defs = {
- "phy_pcie",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2_E5,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
- PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_led_defs = {
- "led", {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 14
-};
-
-static struct block_defs block_avs_wrap_defs = {
- "avs_wrap", {false, true, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 11
-};
-
-static struct block_defs block_pxpreqbus_defs = {
- "pxpreqbus", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_bar0_map_defs = {
- "bar0_map", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
- &block_grc_defs,
- &block_miscs_defs,
- &block_misc_defs,
- &block_dbu_defs,
- &block_pglue_b_defs,
- &block_cnig_defs,
- &block_cpmu_defs,
- &block_ncsi_defs,
- &block_opte_defs,
- &block_bmb_defs,
- &block_pcie_defs,
- &block_mcp_defs,
- &block_mcp2_defs,
- &block_pswhst_defs,
- &block_pswhst2_defs,
- &block_pswrd_defs,
- &block_pswrd2_defs,
- &block_pswwr_defs,
- &block_pswwr2_defs,
- &block_pswrq_defs,
- &block_pswrq2_defs,
- &block_pglcs_defs,
- &block_dmae_defs,
- &block_ptu_defs,
- &block_tcm_defs,
- &block_mcm_defs,
- &block_ucm_defs,
- &block_xcm_defs,
- &block_ycm_defs,
- &block_pcm_defs,
- &block_qm_defs,
- &block_tm_defs,
- &block_dorq_defs,
- &block_brb_defs,
- &block_src_defs,
- &block_prs_defs,
- &block_tsdm_defs,
- &block_msdm_defs,
- &block_usdm_defs,
- &block_xsdm_defs,
- &block_ysdm_defs,
- &block_psdm_defs,
- &block_tsem_defs,
- &block_msem_defs,
- &block_usem_defs,
- &block_xsem_defs,
- &block_ysem_defs,
- &block_psem_defs,
- &block_rss_defs,
- &block_tmld_defs,
- &block_muld_defs,
- &block_yuld_defs,
- &block_xyld_defs,
- &block_ptld_defs,
- &block_ypld_defs,
- &block_prm_defs,
- &block_pbf_pb1_defs,
- &block_pbf_pb2_defs,
- &block_rpb_defs,
- &block_btb_defs,
- &block_pbf_defs,
- &block_rdif_defs,
- &block_tdif_defs,
- &block_cdu_defs,
- &block_ccfc_defs,
- &block_tcfc_defs,
- &block_igu_defs,
- &block_cau_defs,
- &block_rgfs_defs,
- &block_rgsrc_defs,
- &block_tgfs_defs,
- &block_tgsrc_defs,
- &block_umac_defs,
- &block_xmac_defs,
- &block_dbg_defs,
- &block_nig_defs,
- &block_wol_defs,
- &block_bmbn_defs,
- &block_ipc_defs,
- &block_nwm_defs,
- &block_nws_defs,
- &block_ms_defs,
- &block_phy_pcie_defs,
- &block_led_defs,
- &block_avs_wrap_defs,
- &block_pxpreqbus_defs,
- &block_misc_aeu_defs,
- &block_bar0_map_defs,
-};
-
-static struct platform_defs s_platform_defs[] = {
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE_BB_K2,
+ PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ PSEM_REG_SLOW_DBG_MODE_BB_K2,
+ PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_SYNC_DBG_EMPTY,
+ PSEM_REG_DBG_GPRE_VECT,
+ PCM_REG_CTX_RBC_ACCS,
+ {0, PCM_REG_SM_CON_CTX, 0, 0},
+ {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
+ },
+};
+
+static struct hw_type_defs s_hw_type_defs[] = {
+ /* HW_TYPE_ASIC */
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
@@ -1505,146 +579,159 @@ static struct platform_defs s_platform_defs[] = {
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
- /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
- {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
+ /* DBG_GRC_PARAM_DUMP_DORQ */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ /* DBG_GRC_PARAM_RESERVED1 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
- MAX_LCIDS, MAX_LCIDS},
+ /* DBG_GRC_PARAM_RESERVED2 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
- MAX_LTIDS, MAX_LTIDS},
+ /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
+ {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, false, 0, 0},
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, true, false, 0, 0},
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0, 0}, 0, 1, false, false, 1, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0, 0}, 0, 1, false, false, 0, 0}
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_RESERVED3 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
+ {{0, 1}, 0, 1, false, false, 0, {0, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_CAU_EXT */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
- { "rss_mem_cid", "rss_cid", 0, 32,
- {256, 320, 512} },
+ {"rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320}},
- { "rss_mem_key_msb", "rss_key", 1024, 256,
- {128, 208, 257} },
+ {"rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208}},
- { "rss_mem_key_lsb", "rss_key", 2048, 64,
- {128, 208, 257} },
+ {"rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208}},
- { "rss_mem_info", "rss_info", 3072, 16,
- {128, 208, 256} },
+ {"rss_mem_info", "rss_info", 3072, 16,
+ {128, 208}},
- { "rss_mem_ind", "rss_ind", 4096, 16,
- {16384, 26624, 32768} }
+ {"rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624}}
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1655,54 +742,31 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
};
static struct big_ram_defs s_big_ram_defs[] = {
- { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
- BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- MISC_REG_BLOCK_256B_EN, {0, 0, 0},
- {153600, 180224, 282624} },
-
- { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
- BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- MISC_REG_BLOCK_256B_EN, {0, 1, 1},
- {92160, 117760, 168960} },
-
- { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
- BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
- {36864, 36864, 36864} }
-};
-
-static struct reset_reg_defs s_reset_regs_defs[] = {
- /* DBG_RESET_REG_MISCS_PL_UA */
- { MISCS_REG_RESET_PL_UA,
- {true, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV,
- {true, true, true}, {0x0, 0x400, 0x600} },
-
- /* DBG_RESET_REG_MISCS_PL_HV_2 */
- { MISCS_REG_RESET_PL_HV_2_K2_E5,
- {false, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISC_PL_UA */
- { MISC_REG_RESET_PL_UA,
- {true, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISC_PL_HV */
- { MISC_REG_RESET_PL_HV,
- {true, true, true}, {0x0, 0x0, 0x0} },
+ {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 0},
+ {153600, 180224}},
- /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
- { MISC_REG_RESET_PL_PDA_VMAIN_1,
- {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
+ {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 1},
+ {92160, 117760}},
- /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
- { MISC_REG_RESET_PL_PDA_VMAIN_2,
- {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
+ {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ MISCS_REG_BLOCK_256B_EN, {0, 0},
+ {36864, 36864}}
+};
- /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
- { MISC_REG_RESET_PL_PDA_VAUX,
- {true, true, true}, {0x2, 0x2, 0x2} },
+static struct rbc_reset_defs s_rbc_reset_defs[] = {
+ {MISCS_REG_RESET_PL_HV,
+ {0x0, 0x400}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {0x4404040, 0x4404040}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {0x7, 0x7c00007}},
+ {MISC_REG_RESET_PL_PDA_VAUX,
+ {0x2, 0x2}},
};
static struct phy_defs s_phy_defs[] = {
@@ -1785,9 +849,19 @@ static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
}
}
+/* Sets pointer and size for the specified binary buffer type */
+static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
+ enum bin_dbg_buffer_type buf_type,
+ const u32 *ptr, u32 size)
+{
+ struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
+
+ buf->ptr = (void *)ptr;
+ buf->size = size;
+}
+
/* Initializes debug data for the specified device */
-static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 num_pfs = 0, max_pfs_per_port = 0;
@@ -1812,26 +886,25 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
return DBG_STATUS_UNKNOWN_CHIP;
}
- /* Set platofrm */
- dev_data->platform_id = PLATFORM_ASIC;
+ /* Set HW type */
+ dev_data->hw_type = HW_TYPE_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
/* Set port mode */
- switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
- case 0:
+ switch (p_hwfn->cdev->num_ports_in_engine) {
+ case 1:
dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
break;
- case 1:
+ case 2:
dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
break;
- case 2:
+ case 4:
dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
break;
}
/* Set 100G mode */
- if (dev_data->chip_id == CHIP_BB &&
- qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
+ if (QED_IS_CMT(p_hwfn->cdev))
dev_data->mode_enable[MODE_100G] = 1;
/* Set number of ports */
@@ -1857,14 +930,36 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
-static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
- enum block_id block_id)
+static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block *dbg_block;
+
+ dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
+ return dbg_block + block_id;
+}
+
+static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
+ *p_hwfn,
+ enum block_id
+ block_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
- MAX_CHIP_IDS +
- dev_data->chip_id];
+ return (const struct dbg_block_chip *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
+ block_id * MAX_CHIP_IDS + dev_data->chip_id;
+}
+
+static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
+ *p_hwfn,
+ u8 reset_reg_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (const struct dbg_reset_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
+ reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
}
/* Reads the FW info structure for the specified Storm from the chip,
@@ -1885,8 +980,9 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
- sizeof(fw_info_location);
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+
dest = (u32 *)&fw_info_location;
for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
@@ -2081,6 +1177,29 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
}
+/* Reads the chip revision from the chip and writes it as a param to the
+ * specified buffer. Returns the dumped size in dwords.
+ */
+static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char param_str[3] = "??";
+
+ if (dev_data->hw_type == HW_TYPE_ASIC) {
+ u32 chip_rev, chip_metal;
+
+ chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+
+ param_str[0] = 'a' + (u8)chip_rev;
+ param_str[1] = '0' + (u8)chip_metal;
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
+}
+
/* Writes a section header to the specified buffer.
* Returns the dumped size in dwords.
*/
@@ -2104,7 +1223,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
u8 num_params;
/* Dump global params section header */
- num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
+ (dev_data->chip_id == CHIP_BB ? 1 : 0);
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params", num_params);
@@ -2112,6 +1232,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
offset += qed_dump_mfw_ver_param(p_hwfn,
p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_chip_revision_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
offset += qed_dump_num_param(dump_buf + offset,
dump, "tools-version", TOOLS_VERSION);
offset += qed_dump_str_param(dump_buf + offset,
@@ -2121,11 +1243,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump,
"platform",
- s_platform_defs[dev_data->platform_id].
- name);
- offset +=
- qed_dump_num_param(dump_buf + offset, dump, "pci-func",
- p_hwfn->abs_pf_id);
+ s_hw_type_defs[dev_data->hw_type].name);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "pci-func", p_hwfn->abs_pf_id);
+ if (dev_data->chip_id == CHIP_BB)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "path", QED_PATH_ID(p_hwfn));
return offset;
}
@@ -2156,24 +1279,87 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 i;
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 rst_reg_id;
+ u32 blk_id;
/* Read reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++)
- if (s_reset_regs_defs[i].exists[dev_data->chip_id])
- reg_val[i] = qed_rd(p_hwfn,
- p_ptt, s_reset_regs_defs[i].addr);
+ for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
+ const struct dbg_reset_reg *rst_reg;
+ bool rst_reg_removed;
+ u32 rst_reg_addr;
+
+ rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
+ rst_reg_removed = GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_IS_REMOVED);
+ rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_ADDR));
+
+ if (!rst_reg_removed)
+ reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
+ rst_reg_addr);
+ }
/* Check if blocks are in reset */
- for (i = 0; i < MAX_BLOCK_ID; i++) {
- struct block_defs *block = s_block_defs[i];
+ for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
+ const struct dbg_block_chip *blk;
+ bool has_rst_reg;
+ bool is_removed;
+
+ blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
+ is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_rst_reg = GET_FIELD(blk->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+
+ if (!is_removed && has_rst_reg)
+ dev_data->block_in_reset[blk_id] =
+ !(reg_val[blk->reset_reg_id] &
+ BIT(blk->reset_reg_bit_offset));
+ }
+}
+
+/* is_mode_match recursive function */
+static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
+ u16 *modes_buf_offset, u8 rec_depth)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 *dbg_array;
+ bool arg1, arg2;
+ u8 tree_val;
+
+ if (rec_depth > MAX_RECURSION_DEPTH) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
+ return false;
+ }
- dev_data->block_in_reset[i] = block->has_reset_bit &&
- !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
+ /* Get next element from modes tree buffer */
+ dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = dbg_array[(*modes_buf_offset)++];
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ arg2 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
}
}
+/* Returns true if the mode (specified using modes_buf_offset) is enabled */
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
+}
+
/* Enable / disable the Debug block */
static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -2185,23 +1371,21 @@ static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
- u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
- struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
+ u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ const struct dbg_reset_reg *reset_reg;
+ const struct dbg_block_chip *block;
- dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
- old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
- new_reset_reg_val =
- old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
+ block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
+ reset_reg_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
- qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
- qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
-}
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
-static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum dbg_bus_frame_modes mode)
-{
- qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
}
/* Enable / disable Debug Bus clients according to the specified mask
@@ -2213,28 +1397,65 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
}
-static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
+{
+ const struct dbg_block_chip *block =
+ qed_get_dbg_block_per_chip(p_hwfn, block_id);
+
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
+ line_id);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
+ enable_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
+ right_shift);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
+ force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
+ force_frame_mask);
+}
+
+/* Disable debug bus in all blocks */
+static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- bool arg1, arg2;
- const u32 *ptr;
- u8 tree_val;
+ u32 block_id;
- /* Get next element from modes tree buffer */
- ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
- tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ /* Disable all blocks */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_block_chip *block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)block_id);
- switch (tree_val) {
- case INIT_MODE_OP_NOT:
- return !qed_is_mode_match(p_hwfn, modes_buf_offset);
- case INIT_MODE_OP_OR:
- case INIT_MODE_OP_AND:
- arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
- arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
- return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
- arg2) : (arg1 && arg2);
- default:
- return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED) ||
+ dev_data->block_in_reset[block_id])
+ continue;
+
+ /* Disable debug bus */
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
+ u32 dbg_en_addr =
+ block_per_chip->dbg_dword_enable_reg_addr;
+ u16 modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ bool eval_mode =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ qed_wr(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(dbg_en_addr),
+ 0);
+ }
}
}
@@ -2247,6 +1468,20 @@ static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
return qed_grc_get_param(p_hwfn, grc_param) > 0;
}
+/* Returns the storm_id that matches the specified Storm letter,
+ * or MAX_DBG_STORMS if invalid storm letter.
+ */
+static enum dbg_storms qed_get_id_from_letter(char storm_letter)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
+ if (s_storm_defs[storm_id].letter == storm_letter)
+ return (enum dbg_storms)storm_id;
+
+ return MAX_DBG_STORMS;
+}
+
/* Returns true of the specified Storm should be included in the dump, false
* otherwise.
*/
@@ -2262,14 +1497,20 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
- struct block_defs *block = s_block_defs[block_id];
+ const struct dbg_block *block;
u8 i;
- /* Check Storm match */
- if (block->associated_to_storm &&
- !qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)block->storm_id))
- return false;
+ block = get_dbg_block(p_hwfn, block_id);
+
+ /* If the block is associated with a Storm, check Storm match */
+ if (block->associated_storm_letter) {
+ enum dbg_storms associated_storm_id =
+ qed_get_id_from_letter(block->associated_storm_letter);
+
+ if (associated_storm_id == MAX_DBG_STORMS ||
+ !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
+ return false;
+ }
for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
struct big_ram_defs *big_ram = &s_big_ram_defs[i];
@@ -2291,6 +1532,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_CAU_SB:
case MEM_GROUP_CAU_PI:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ case MEM_GROUP_CAU_MEM_EXT:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
case MEM_GROUP_QM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
case MEM_GROUP_CFC_MEM:
@@ -2298,6 +1541,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_TASK_CFC_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
+ case MEM_GROUP_DORQ_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2343,64 +1588,104 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
msleep(STALL_DELAY_MS);
}
-/* Takes all blocks out of reset */
+/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
+ * taken out of reset.
+ */
static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+ struct qed_ptt *p_ptt, bool rbc_only)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 block_id, i;
+ u8 chip_id = dev_data->chip_id;
+ u32 i;
- /* Fill reset regs values */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
+ /* Take RBCs out of reset */
+ for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
+ if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_rbc_reset_defs[i].reset_reg_addr +
+ RESET_REG_UNRESET_OFFSET,
+ s_rbc_reset_defs[i].reset_val[chip_id]);
- if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
- block->unreset)
- reg_val[block->reset_reg] |=
- BIT(block->reset_bit_offset);
- }
+ if (!rbc_only) {
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 reset_reg_id;
+ u32 block_id;
- /* Write reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
- continue;
+ /* Fill reset regs values */
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
+ bool is_removed, has_reset_reg, unreset_before_dump;
+ const struct dbg_block_chip *block;
+
+ block = qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)
+ block_id);
+ is_removed =
+ GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_reset_reg =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+ unreset_before_dump =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
+
+ if (!is_removed && has_reset_reg && unreset_before_dump)
+ reg_val[block->reset_reg_id] |=
+ BIT(block->reset_reg_bit_offset);
+ }
- reg_val[i] |=
- s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
+ /* Write reset registers */
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
- if (reg_val[i])
- qed_wr(p_hwfn,
- p_ptt,
- s_reset_regs_defs[i].addr +
- RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD
+ (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
+ continue;
+
+ if (reg_val[reset_reg_id]) {
+ reset_reg_addr =
+ GET_FIELD(reset_reg->data,
+ DBG_RESET_REG_ADDR);
+ qed_wr(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(reset_reg_addr) +
+ RESET_REG_UNRESET_OFFSET,
+ reg_val[reset_reg_id]);
+ }
+ }
}
}
/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
-qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type)
{
const struct dbg_attn_block *base_attn_block_arr =
- (const struct dbg_attn_block *)
- s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+ (const struct dbg_attn_block *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
return &base_attn_block_arr[block_id].per_type_data[attn_type];
}
/* Returns the attention registers of the specified block */
static const struct dbg_attn_reg *
-qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type,
u8 *num_attn_regs)
{
const struct dbg_attn_block_type_data *block_type_data =
- qed_get_block_attn_data(block_id, attn_type);
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
- return &((const struct dbg_attn_reg *)
- s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
- regs_offset];
+ return (const struct dbg_attn_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
+ block_type_data->regs_offset;
}
/* For each block, clear the status of all parities */
@@ -2412,11 +1697,12 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
u8 reg_idx, num_attn_regs;
u32 block_id;
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
continue;
- attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
@@ -2444,22 +1730,20 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
}
/* Dumps GRC registers section header. Returns the dumped size in dwords.
- * The following parameters are dumped:
+ * the following parameters are dumped:
* - count: no. of dumped entries
* - split_type: split type
* - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
- * - param_name: user parameter value (dumped only if param_name != NULL
- * and param_val != NULL).
+ * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
u32 num_reg_entries,
enum init_split_types split_type,
- u8 split_id,
- const char *param_name, const char *param_val)
+ u8 split_id, const char *reg_type_name)
{
u8 num_params = 2 +
- (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
+ (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
u32 offset = 0;
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -2472,9 +1756,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
if (split_type != SPLIT_TYPE_NONE)
offset += qed_dump_num_param(dump_buf + offset,
dump, "id", split_id);
- if (param_name && param_val)
+ if (reg_type_name)
offset += qed_dump_str_param(dump_buf + offset,
- dump, param_name, param_val);
+ dump, "type", reg_type_name);
return offset;
}
@@ -2504,21 +1788,12 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
+ bool read_using_dmae = false;
+ u32 thresh;
if (!dump)
return len;
- /* Print log if needed */
- dev_data->num_regs_read += len;
- if (dev_data->num_regs_read >=
- s_platform_defs[dev_data->platform_id].log_thresh) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers...\n",
- dev_data->num_regs_read);
- dev_data->num_regs_read = 0;
- }
-
switch (split_type) {
case SPLIT_TYPE_PORT:
port_id = split_id;
@@ -2539,38 +1814,77 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
}
/* Try reading using DMAE */
- if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
- (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
- wide_bus)) {
- if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
- (u64)(uintptr_t)(dump_buf), len, NULL))
- return len;
- dev_data->use_dmae = 0;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Failed reading from chip using DMAE, using GRC instead\n");
+ if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
+ (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
+ (PROTECT_WIDE_BUS && wide_bus))) {
+ struct qed_dmae_params dmae_params;
+
+ /* Set DMAE params */
+ memset(&dmae_params, 0, sizeof(dmae_params));
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ dmae_params.port_id = port_id;
+ break;
+ case SPLIT_TYPE_PF:
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.src_pfid = pf_id;
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.port_id = port_id;
+ dmae_params.src_pfid = pf_id;
+ break;
+ default:
+ break;
+ }
+
+ /* Execute DMAE command */
+ read_using_dmae = !qed_dmae_grc2host(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf),
+ len, &dmae_params);
+ if (!read_using_dmae) {
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
}
+ if (read_using_dmae)
+ goto print_log;
+
/* If not read using DMAE, read using GRC */
/* Set pretend */
- if (split_type != dev_data->pretend.split_type || split_id !=
- dev_data->pretend.split_id) {
+ if (split_type != dev_data->pretend.split_type ||
+ split_id != dev_data->pretend.split_id) {
switch (split_type) {
case SPLIT_TYPE_PORT:
qed_port_pretend(p_hwfn, p_ptt, port_id);
break;
case SPLIT_TYPE_PF:
- fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
case SPLIT_TYPE_PORT_PF:
- fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
break;
case SPLIT_TYPE_VF:
- fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
- (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
+ | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
+ vf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
default:
@@ -2584,6 +1898,16 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
/* Read registers using GRC */
qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+print_log:
+ /* Print log */
+ dev_data->num_regs_read += len;
+ thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
+ if ((dev_data->num_regs_read / thresh) >
+ ((dev_data->num_regs_read - len) / thresh))
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumped %d registers...\n", dev_data->num_regs_read);
+
return len;
}
@@ -2668,7 +1992,7 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_regs_arr,
+ struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
enum init_split_types split_type,
@@ -2681,10 +2005,10 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
*num_dumped_reg_entries = 0;
- while (input_offset < input_regs_arr.size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
- &input_regs_arr.ptr[input_offset++];
+ input_regs_arr.ptr + input_offset++;
u16 modes_buf_offset;
bool eval_mode;
@@ -2707,7 +2031,7 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
- &input_regs_arr.ptr[input_offset];
+ input_regs_arr.ptr + input_offset;
u32 addr, len;
bool wide_bus;
@@ -2732,14 +2056,12 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_regs_arr,
+ struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
enum init_split_types split_type,
- u8 split_id,
- const char *param_name,
- const char *param_val)
+ u8 split_id, const char *reg_type_name)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum init_split_types hdr_split_type = split_type;
@@ -2757,7 +2079,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
false,
0,
hdr_split_type,
- hdr_split_id, param_name, param_val);
+ hdr_split_id, reg_type_name);
/* Dump registers */
offset += qed_grc_dump_regs_entries(p_hwfn,
@@ -2776,7 +2098,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
dump,
num_dumped_reg_entries,
hdr_split_type,
- hdr_split_id, param_name, param_val);
+ hdr_split_id, reg_type_name);
return num_dumped_reg_entries > 0 ? offset : 0;
}
@@ -2789,32 +2111,33 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
- const char *param_name, const char *param_val)
+ const char *reg_type_name)
{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0, input_offset = 0;
- u16 fid;
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
- struct dbg_array curr_input_regs_arr;
+ struct virt_mem_desc curr_input_regs_arr;
enum init_split_types split_type;
u16 split_count = 0;
u32 split_data_size;
u8 split_id;
split_hdr =
- (const struct dbg_dump_split_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ (const struct dbg_dump_split_hdr *)
+ dbg_buf->ptr + input_offset++;
split_type =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- split_data_size =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
curr_input_regs_arr.ptr =
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
- curr_input_regs_arr.size_in_dwords = split_data_size;
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
+ input_offset;
+ curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
switch (split_type) {
case SPLIT_TYPE_NONE:
@@ -2842,16 +2165,16 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
dump, block_enable,
split_type,
split_id,
- param_name,
- param_val);
+ reg_type_name);
input_offset += split_data_size;
}
/* Cancel pretends (pretend to original PF) */
if (dump) {
- fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
- qed_fid_pretend(p_hwfn, p_ptt, fid);
+ qed_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ p_hwfn->rel_pf_id));
dev_data->pretend.split_type = SPLIT_TYPE_NONE;
dev_data->pretend.split_id = 0;
}
@@ -2864,26 +2187,32 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i, offset = 0, num_regs = 0;
+ u32 offset = 0, num_regs = 0;
+ u8 reset_reg_id;
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0,
- SPLIT_TYPE_NONE, 0, NULL, NULL);
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
/* Write reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
+
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
continue;
+ reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs[i].addr), 1,
- false, SPLIT_TYPE_NONE, 0);
+ reset_reg_addr,
+ 1, false, SPLIT_TYPE_NONE, 0);
num_regs++;
}
@@ -2891,7 +2220,7 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ 0, "RESET_REGS");
return offset;
}
@@ -2904,21 +2233,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 block_id, offset = 0, num_reg_entries = 0;
+ u32 block_id, offset = 0, stall_regs_offset;
const struct dbg_attn_reg *attn_reg_arr;
u8 storm_id, reg_idx, num_attn_regs;
+ u32 num_reg_entries = 0;
- /* Calculate header size */
+ /* Write empty header for attention registers */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
/* Write parity registers */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id] && dump)
continue;
- attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
@@ -2961,16 +2292,29 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
}
}
+ /* Overwrite header for attention registers */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "ATTN_REGS");
+
+ /* Write empty header for stall registers */
+ stall_regs_offset = offset;
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, SPLIT_TYPE_NONE, 0, "REGS");
+
/* Write Storm stall status registers */
- for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
+ storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 addr;
- if (dev_data->block_in_reset[storm->block_id] && dump)
+ if (dev_data->block_in_reset[storm->sem_block_id] && dump)
continue;
addr =
- BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -2982,12 +2326,12 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
num_reg_entries++;
}
- /* Write header */
+ /* Overwrite header for stall registers */
if (dump)
- qed_grc_dump_regs_hdr(dump_buf,
+ qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
true,
- num_reg_entries, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "REGS");
return offset;
}
@@ -3000,8 +2344,7 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
u32 offset = 0, addr;
offset += qed_grc_dump_regs_hdr(dump_buf,
- dump, 2, SPLIT_TYPE_NONE, 0,
- NULL, NULL);
+ dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
* skipped).
@@ -3049,8 +2392,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 len,
u32 bit_width,
bool packed,
- const char *mem_group,
- bool is_storm, char storm_letter)
+ const char *mem_group, char storm_letter)
{
u8 num_params = 3;
u32 offset = 0;
@@ -3071,7 +2413,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
if (name) {
/* Dump name */
- if (is_storm) {
+ if (storm_letter) {
strcpy(buf, "?STORM_");
buf[0] = storm_letter;
strcpy(buf + strlen(buf), name);
@@ -3103,7 +2445,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
dump, "packed", 1);
/* Dump reg type */
- if (is_storm) {
+ if (storm_letter) {
strcpy(buf, "?STORM_");
buf[0] = storm_letter;
strcpy(buf + strlen(buf), mem_group);
@@ -3130,8 +2472,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
bool wide_bus,
u32 bit_width,
bool packed,
- const char *mem_group,
- bool is_storm, char storm_letter)
+ const char *mem_group, char storm_letter)
{
u32 offset = 0;
@@ -3142,8 +2483,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
addr,
len,
bit_width,
- packed,
- mem_group, is_storm, storm_letter);
+ packed, mem_group, storm_letter);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -3156,20 +2496,21 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
/* Dumps GRC memories entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_mems_arr,
+ struct virt_mem_desc input_mems_arr,
u32 *dump_buf, bool dump)
{
u32 i, offset = 0, input_offset = 0;
bool mode_match = true;
- while (input_offset < input_mems_arr.size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr;
u16 modes_buf_offset;
u32 num_entries;
bool eval_mode;
- cond_hdr = (const struct dbg_dump_cond_hdr *)
- &input_mems_arr.ptr[input_offset++];
+ cond_hdr =
+ (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
+ input_offset++;
num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
/* Check required mode */
@@ -3191,24 +2532,25 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_entries;
i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
const struct dbg_dump_mem *mem =
- (const struct dbg_dump_mem *)
- &input_mems_arr.ptr[input_offset];
- u8 mem_group_id = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_MEM_GROUP_ID);
- bool is_storm = false, mem_wide_bus;
- enum dbg_grc_params grc_param;
- char storm_letter = 'a';
- enum block_id block_id;
+ (const struct dbg_dump_mem *)((u32 *)
+ input_mems_arr.ptr
+ + input_offset);
+ const struct dbg_block *block;
+ char storm_letter = 0;
u32 mem_addr, mem_len;
+ bool mem_wide_bus;
+ u8 mem_group_id;
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
if (mem_group_id >= MEM_GROUPS_NUM) {
DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
return 0;
}
- block_id = (enum block_id)cond_hdr->block_id;
if (!qed_grc_is_mem_included(p_hwfn,
- block_id,
+ (enum block_id)
+ cond_hdr->block_id,
mem_group_id))
continue;
@@ -3217,42 +2559,14 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
mem_wide_bus = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_WIDE_BUS);
- /* Update memory length for CCFC/TCFC memories
- * according to number of LCIDs/LTIDs.
- */
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
- if (mem_len % MAX_LCIDS) {
- DP_NOTICE(p_hwfn,
- "Invalid CCFC connection memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- mem_len = qed_grc_get_param(p_hwfn, grc_param) *
- (mem_len / MAX_LCIDS);
- } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
- if (mem_len % MAX_LTIDS) {
- DP_NOTICE(p_hwfn,
- "Invalid TCFC task memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- mem_len = qed_grc_get_param(p_hwfn, grc_param) *
- (mem_len / MAX_LTIDS);
- }
+ block = get_dbg_block(p_hwfn,
+ cond_hdr->block_id);
- /* If memory is associated with Storm, update Storm
- * details.
+ /* If memory is associated with Storm,
+ * update storm details
*/
- if (s_block_defs
- [cond_hdr->block_id]->associated_to_storm) {
- is_storm = true;
- storm_letter =
- s_storm_defs[s_block_defs
- [cond_hdr->block_id]->
- storm_id].letter;
- }
+ if (block->associated_storm_letter)
+ storm_letter = block->associated_storm_letter;
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3266,7 +2580,6 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
0,
false,
s_mem_group_names[mem_group_id],
- is_storm,
storm_letter);
}
}
@@ -3281,26 +2594,25 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
u32 offset = 0, input_offset = 0;
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
- struct dbg_array curr_input_mems_arr;
+ struct virt_mem_desc curr_input_mems_arr;
enum init_split_types split_type;
u32 split_data_size;
- split_hdr = (const struct dbg_dump_split_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- split_type =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- split_data_size =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- curr_input_mems_arr.ptr =
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
- curr_input_mems_arr.size_in_dwords = split_data_size;
+ split_hdr =
+ (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
+ input_offset++;
+ split_type = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
+ curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
if (split_type == SPLIT_TYPE_NONE)
offset += qed_grc_dump_mem_entries(p_hwfn,
@@ -3328,17 +2640,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
bool dump,
const char *name,
u32 num_lids,
- u32 lid_size,
- u32 rd_reg_addr,
- u8 storm_id)
+ enum cm_ctx_types ctx_type, u8 storm_id)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
struct storm_defs *storm = &s_storm_defs[storm_id];
- u32 i, lid, total_size, offset = 0;
+ u32 i, lid, lid_size, total_size;
+ u32 rd_reg_addr, offset = 0;
+
+ /* Convert quad-regs to dwords */
+ lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
if (!lid_size)
return 0;
- lid_size *= BYTES_IN_DWORD;
total_size = num_lids * lid_size;
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3348,18 +2662,26 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
0,
total_size,
lid_size * 32,
- false, name, true, storm->letter);
+ false, name, storm->letter);
if (!dump)
return offset + total_size;
+ rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
+
/* Dump context data */
for (lid = 0; lid < num_lids; lid++) {
- for (i = 0; i < lid_size; i++, offset++) {
+ for (i = 0; i < lid_size; i++) {
qed_wr(p_hwfn,
p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt, rd_reg_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rd_reg_addr,
+ 1,
+ false,
+ SPLIT_TYPE_NONE, 0);
}
}
@@ -3370,115 +2692,126 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- enum dbg_grc_params grc_param;
u32 offset = 0;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
-
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue;
/* Dump Conn AG context size */
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "CONN_AG_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_conn_ag_ctx_lid_size,
- storm->cm_conn_ag_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_AG, storm_id);
/* Dump Conn ST context size */
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "CONN_ST_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_conn_st_ctx_lid_size,
- storm->cm_conn_st_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_ST, storm_id);
/* Dump Task AG context size */
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "TASK_AG_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_task_ag_ctx_lid_size,
- storm->cm_task_ag_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_AG, storm_id);
/* Dump Task ST context size */
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "TASK_ST_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_task_st_ctx_lid_size,
- storm->cm_task_st_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_ST, storm_id);
}
return offset;
}
-/* Dumps GRC IORs data. Returns the dumped size in dwords. */
-static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
-{
- char buf[10] = "IOR_SET_?";
- u32 addr, offset = 0;
- u8 storm_id, set_id;
+#define VFC_STATUS_RESP_READY_BIT 0
+#define VFC_STATUS_BUSY_BIT 1
+#define VFC_STATUS_SENDING_CMD_BIT 2
- for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
+#define VFC_POLLING_DELAY_MS 1
+#define VFC_POLLING_COUNT 20
- if (!qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id))
- continue;
+/* Reads data from VFC. Returns the number of dwords read (0 on error).
+ * Sizes are specified in dwords.
+ */
+static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct storm_defs *storm,
+ u32 *cmd_data,
+ u32 cmd_size,
+ u32 *addr_data,
+ u32 addr_size,
+ u32 resp_size, u32 *dump_buf)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 vfc_status, polling_ms, polling_count = 0, i;
+ u32 reg_addr, sem_base;
+ bool is_ready = false;
+
+ sem_base = storm->sem_fast_mem_addr;
+ polling_ms = VFC_POLLING_DELAY_MS *
+ s_hw_type_defs[dev_data->hw_type].delay_factor;
+
+ /* Write VFC command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_DATA_WR,
+ cmd_data, cmd_size);
+
+ /* Write VFC address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_ADDR,
+ addr_data, addr_size);
+
+ /* Read response */
+ for (i = 0; i < resp_size; i++) {
+ /* Poll until ready */
+ do {
+ reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ &vfc_status,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1,
+ false, SPLIT_TYPE_NONE, 0);
+ is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
+
+ if (!is_ready) {
+ if (polling_count++ == VFC_POLLING_COUNT)
+ return 0;
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE) +
- IOR_SET_OFFSET(set_id);
- if (strlen(buf) > 0)
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- false,
- 32,
- false,
- "ior",
- true,
- storm->letter);
- }
+ msleep(polling_ms);
+ }
+ } while (!is_ready);
+
+ reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + i,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1, false, SPLIT_TYPE_NONE, 0);
}
- return offset;
+ return resp_size;
}
/* Dump VFC CAM. Returns the dumped size in dwords. */
@@ -3490,7 +2823,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
- u32 row, i, offset = 0;
+ u32 row, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3499,7 +2832,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
0,
total_size,
256,
- false, "vfc_cam", true, storm->letter);
+ false, "vfc_cam", storm->letter);
if (!dump)
return offset + total_size;
@@ -3507,26 +2840,18 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
/* Prepare CAM address */
SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
- for (row = 0; row < VFC_CAM_NUM_ROWS;
- row++, offset += VFC_CAM_RESP_DWORDS) {
- /* Write VFC CAM command */
+ /* Read VFC CAM data */
+ for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
- cam_cmd, VFC_CAM_CMD_DWORDS);
-
- /* Write VFC CAM address */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
- cam_addr, VFC_CAM_ADDR_DWORDS);
-
- /* Read VFC CAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ cam_cmd,
+ VFC_CAM_CMD_DWORDS,
+ cam_addr,
+ VFC_CAM_ADDR_DWORDS,
+ VFC_CAM_RESP_DWORDS,
+ dump_buf + offset);
}
return offset;
@@ -3543,7 +2868,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
- u32 row, i, offset = 0;
+ u32 row, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3554,35 +2879,27 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
256,
false,
ram_defs->type_name,
- true, storm->letter);
-
- /* Prepare RAM address */
- SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+ storm->letter);
if (!dump)
return offset + total_size;
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ /* Read VFC RAM data */
for (row = ram_defs->base_row;
- row < ram_defs->base_row + ram_defs->num_rows;
- row++, offset += VFC_RAM_RESP_DWORDS) {
- /* Write VFC RAM command */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
- ram_cmd, VFC_RAM_CMD_DWORDS);
-
- /* Write VFC RAM address */
+ row < ram_defs->base_row + ram_defs->num_rows; row++) {
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
- ram_addr, VFC_RAM_ADDR_DWORDS);
-
- /* Read VFC RAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ ram_cmd,
+ VFC_RAM_CMD_DWORDS,
+ ram_addr,
+ VFC_RAM_ADDR_DWORDS,
+ VFC_RAM_RESP_DWORDS,
+ dump_buf + offset);
}
return offset;
@@ -3592,16 +2909,13 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 storm_id, i;
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id) ||
- !s_storm_defs[storm_id].has_vfc ||
- (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
- PLATFORM_ASIC))
+ !s_storm_defs[storm_id].has_vfc)
continue;
/* Read CAM */
@@ -3651,7 +2965,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
total_dwords,
rss_defs->entry_width,
packed,
- rss_defs->type_name, false, 0);
+ rss_defs->type_name, 0);
/* Dump RSS data */
if (!dump) {
@@ -3711,7 +3025,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
0,
ram_size,
block_size * 8,
- false, type_name, false, 0);
+ false, type_name, 0);
/* Read and dump Big RAM data */
if (!dump)
@@ -3737,6 +3051,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps MCP scratchpad. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
@@ -3758,8 +3073,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
- MCP_REG_SCRATCH_SIZE_BB_K2,
- false, 0, false, "MCP", false, 0);
+ MCP_REG_SCRATCH_SIZE,
+ false, 0, false, "MCP", 0);
/* Dump MCP cpu_reg_file */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3769,19 +3084,19 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
- false, 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", 0);
/* Dump MCP registers */
block_enable[BLOCK_MCP] = true;
offset += qed_grc_dump_registers(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, block_enable, "block", "MCP");
+ dump, block_enable, "MCP");
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, SPLIT_TYPE_NONE, 0,
- "block", "MCP");
+ "MCP");
addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -3798,7 +3113,9 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Dumps the tbus indirect memory for all PHYs. */
+/* Dumps the tbus indirect memory for all PHYs.
+ * Returns the dumped size in dwords.
+ */
static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
@@ -3832,7 +3149,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
mem_name,
0,
PHY_DUMP_SIZE_DWORDS,
- 16, true, mem_name, false, 0);
+ 16, true, mem_name, 0);
if (!dump) {
offset += PHY_DUMP_SIZE_DWORDS;
@@ -3863,21 +3180,58 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block_id,
- u8 line_id,
- u8 enable_mask,
- u8 right_shift,
- u8 force_valid_mask, u8 force_frame_mask)
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes);
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf);
+
+/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
{
- struct block_defs *block = s_block_defs[block_id];
+ u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
+ u32 hw_dump_size_dwords = 0, offset = 0;
+ enum dbg_status status;
- qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
- qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
- qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
- qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
+ /* Read HW dump image from NVRAM */
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ NVM_TYPE_HW_DUMP_OUT,
+ &hw_dump_offset_bytes,
+ &hw_dump_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return 0;
+
+ hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
+
+ /* Dump HW dump image section */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_hw_dump", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", hw_dump_size_dwords);
+
+ /* Read MCP HW dump image into dump buffer */
+ if (dump && hw_dump_size_dwords) {
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ hw_dump_offset_bytes,
+ hw_dump_size_bytes, dump_buf + offset);
+ if (status != DBG_STATUS_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to read MCP HW Dump image from NVRAM\n");
+ return 0;
+ }
+ }
+ offset += hw_dump_size_dwords;
+
+ return offset;
}
/* Dumps Static Debug data. Returns the dumped size in dwords. */
@@ -3886,26 +3240,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 block_id, line_id, offset = 0;
+ u32 block_id, line_id, offset = 0, addr, len;
/* Don't dump static debug if a debug bus recording is in progress */
if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
return 0;
if (dump) {
- /* Disable all blocks debug output */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
-
- if (block->dbg_client_id[dev_data->chip_id] !=
- MAX_DBG_BUS_CLIENTS)
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
- 0);
- }
+ /* Disable debug bus in all blocks */
+ qed_bus_disable_blocks(p_hwfn, p_ptt);
qed_bus_reset_dbg_block(p_hwfn, p_ptt);
- qed_bus_set_framing_mode(p_hwfn,
- p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
qed_wr(p_hwfn,
p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
@@ -3914,28 +3261,48 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Dump all static debug lines for each relevant block */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
- struct dbg_bus_block *block_desc;
- u32 block_dwords, addr, len;
- u8 dbg_client_id;
+ const struct dbg_block_chip *block_per_chip;
+ const struct dbg_block *block;
+ bool is_removed, has_dbg_bus;
+ u16 modes_buf_offset;
+ u32 block_dwords;
+
+ block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
+ is_removed = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED);
+ has_dbg_bus = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS);
- if (block->dbg_client_id[dev_data->chip_id] ==
- MAX_DBG_BUS_CLIENTS)
+ /* read+clear for NWS parity is not working, skip NWS block */
+ if (block_id == BLOCK_NWS)
continue;
- block_desc = get_dbg_bus_block_desc(p_hwfn,
- (enum block_id)block_id);
- block_dwords = NUM_DBG_LINES(block_desc) *
+ if (!is_removed && has_dbg_bus &&
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0) {
+ modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ has_dbg_bus = false;
+ }
+
+ if (is_removed || !has_dbg_bus)
+ continue;
+
+ block_dwords = NUM_DBG_LINES(block_per_chip) *
STATIC_DEBUG_LINE_DWORDS;
/* Dump static section params */
+ block = get_dbg_block(p_hwfn, (enum block_id)block_id);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
block->name,
0,
block_dwords,
- 32, false, "STATIC", false, 0);
+ 32, false, "STATIC", 0);
if (!dump) {
offset += block_dwords;
@@ -3951,20 +3318,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
}
/* Enable block's client */
- dbg_client_id = block->dbg_client_id[dev_data->chip_id];
qed_bus_enable_clients(p_hwfn,
p_ptt,
- BIT(dbg_client_id));
+ BIT(block_per_chip->dbg_client_id));
addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
len = STATIC_DEBUG_LINE_DWORDS;
- for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
line_id++) {
/* Configure debug line ID */
- qed_config_dbg_line(p_hwfn,
- p_ptt,
- (enum block_id)block_id,
- (u8)line_id, 0xf, 0, 0, 0);
+ qed_bus_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
/* Read debug line info */
offset += qed_grc_dump_addr_range(p_hwfn,
@@ -3979,7 +3345,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Disable block's client and debug output */
qed_bus_enable_clients(p_hwfn, p_ptt, 0);
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
+ qed_bus_config_dbg_line(p_hwfn, p_ptt,
+ (enum block_id)block_id, 0, 0, 0, 0, 0);
}
if (dump) {
@@ -3999,8 +3366,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 dwords_read, offset = 0;
bool parities_masked = false;
- u32 offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -4019,13 +3386,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"num-lcids",
- qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS));
+ NUM_OF_LCIDS);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"num-ltids",
- qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS));
+ NUM_OF_LTIDS);
offset += qed_dump_num_param(dump_buf + offset,
dump, "num-ports", dev_data->num_ports);
@@ -4037,7 +3402,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
/* Take all blocks out of reset (using reset registers) */
if (dump) {
- qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
qed_update_blocks_reset_state(p_hwfn, p_ptt);
}
@@ -4080,7 +3445,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf +
offset,
dump,
- block_enable, NULL, NULL);
+ block_enable, NULL);
/* Dump special registers */
offset += qed_grc_dump_special_regs(p_hwfn,
@@ -4114,23 +3479,29 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump, i);
- /* Dump IORs */
- if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
- offset += qed_grc_dump_iors(p_hwfn,
- p_ptt, dump_buf + offset, dump);
-
/* Dump VFC */
- if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
- offset += qed_grc_dump_vfc(p_hwfn,
- p_ptt, dump_buf + offset, dump);
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
+ dwords_read = qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += dwords_read;
+ if (!dwords_read)
+ return DBG_STATUS_VFC_READ_ERROR;
+ }
/* Dump PHY tbus */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
- CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
offset += qed_grc_dump_phy(p_hwfn,
p_ptt, dump_buf + offset, dump);
+ /* Dump MCP HW Dump */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
+ offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
/* Dump static debug data (only if not during debug bus recording) */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_STATIC) &&
@@ -4181,8 +3552,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
u8 reg_id;
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
- regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
cond_regs = &regs[0].cond_reg;
info_regs = &regs[rule->num_cond_regs].info_reg;
@@ -4202,8 +3574,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
struct dbg_idle_chk_result_reg_hdr *reg_hdr;
- reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
- (dump_buf + offset);
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
/* Write register header */
if (!dump) {
@@ -4320,12 +3692,13 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
const u32 *imm_values;
rule = &input_rules[i];
- regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
- [rule->reg_offset];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
cond_regs = &regs[0].cond_reg;
- imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
- [rule->imm_offset];
+ imm_values =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
+ rule->imm_offset;
/* Check if all condition register blocks are out of reset, and
* find maximal number of entries (all condition registers that
@@ -4443,10 +3816,12 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- u32 num_failing_rules_offset, offset = 0, input_offset = 0;
- u32 num_failing_rules = 0;
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
+ u32 num_failing_rules_offset, offset = 0,
+ input_offset = 0, num_failing_rules = 0;
- /* Dump global params */
+ /* Dump global params - 1 must match below amount of params */
offset += qed_dump_common_global_params(p_hwfn,
p_ptt,
dump_buf + offset, dump, 1);
@@ -4458,12 +3833,10 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_failing_rules_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_idle_chk_cond_hdr *cond_hdr =
- (const struct dbg_idle_chk_cond_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
- [input_offset++];
+ (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
+ input_offset++;
bool eval_mode, mode_match = true;
u32 curr_failing_rules;
u16 modes_buf_offset;
@@ -4480,16 +3853,21 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
+ const struct dbg_idle_chk_rule *rule =
+ (const struct dbg_idle_chk_rule *)((u32 *)
+ dbg_buf->ptr
+ + input_offset);
+ u32 num_input_rules =
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
offset +=
qed_idle_chk_dump_rule_entries(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- (const struct dbg_idle_chk_rule *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
- ptr[input_offset],
- cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
- &curr_failing_rules);
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ rule,
+ num_input_rules,
+ &curr_failing_rules);
num_failing_rules += curr_failing_rules;
}
@@ -4556,7 +3934,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
{
u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0;
+ u32 read_offset = 0, param = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
@@ -4569,14 +3947,14 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
/* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM,
- (nvram_offset_bytes +
- read_offset) |
- (bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_OFFSET),
- &ret_mcp_resp, &ret_mcp_param,
- &ret_read_size,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset)))
return DBG_STATUS_NVRAM_READ_FAILED;
@@ -4714,12 +4092,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
- bool mcp_access;
int halted = 0;
+ bool use_mfw;
*num_dumped_dwords = 0;
- mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+ use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
/* Get trace data info */
status = qed_mcp_trace_get_data_info(p_hwfn,
@@ -4740,7 +4118,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent. if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump && mcp_access) {
+ if (dump && use_mfw) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4780,17 +4158,15 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
*/
trace_meta_size_bytes =
qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
- if ((!trace_meta_size_bytes || dump) && mcp_access) {
+ if ((!trace_meta_size_bytes || dump) && use_mfw)
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
trace_data_size_bytes,
&running_bundle_id,
&trace_meta_offset_bytes,
&trace_meta_size_bytes);
- if (status == DBG_STATUS_OK)
- trace_meta_size_dwords =
- BYTES_TO_DWORDS(trace_meta_size_bytes);
- }
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
/* Dump trace meta size param */
offset += qed_dump_num_param(dump_buf + offset,
@@ -4814,7 +4190,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* If no mcp access, indicate that the dump doesn't contain the meta
* data from NVRAM.
*/
- return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4992,16 +4368,18 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
override_window_dwords =
qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
PROTECTION_OVERRIDE_ELEMENT_DWORDS;
- addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- true,
- addr,
- override_window_dwords,
- true, SPLIT_TYPE_NONE, 0);
- qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
- override_window_dwords);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true, SPLIT_TYPE_NONE, 0);
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+ }
out:
/* Dump last section */
offset += qed_dump_last_section(dump_buf, offset, dump);
@@ -5037,7 +4415,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 last_list_idx, addr;
- if (dev_data->block_in_reset[storm->block_id])
+ if (dev_data->block_in_reset[storm->sem_block_id])
continue;
/* Read FW info for the current Storm */
@@ -5088,20 +4466,362 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps the specified ILT pages to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
+ bool dump,
+ u32 start_page_id,
+ u32 num_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids)
+{
+ u32 page_id, end_page_id, offset = 0;
+
+ if (num_pages == 0)
+ return offset;
+
+ end_page_id = start_page_id + num_pages - 1;
+
+ for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
+ struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
+
+ /**
+ *
+ * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
+ * break;
+ */
+
+ if (!ilt_pages[page_id].virt_addr)
+ continue;
+
+ if (dump_page_ids) {
+ /* Copy page ID to dump buffer */
+ if (dump)
+ *(dump_buf + offset) = page_id;
+ offset++;
+ } else {
+ /* Copy page memory to dump buffer */
+ if (dump)
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr, mem_desc->size);
+ offset += BYTES_TO_DWORDS(mem_desc->size);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps a section containing the dumped ILT pages.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ u32 valid_conn_pf_pages,
+ u32 valid_conn_vf_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 pf_start_line, start_page_id, offset = 0;
+ u32 cdut_pf_init_pages, cdut_vf_init_pages;
+ u32 cdut_pf_work_pages, cdut_vf_work_pages;
+ u32 base_data_offset, size_param_offset;
+ u32 cdut_pf_pages, cdut_vf_pages;
+ const char *section_name;
+ u8 i;
+
+ section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
+ cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
+ cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
+ cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
+ cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
+ cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
+ cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
+ pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+
+ offset +=
+ qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+
+ /* Dump size parameter (0 for now, overwritten with real size later) */
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ base_data_offset = offset;
+
+ /* CDUC pages are ordered as follows:
+ * - PF pages - valid section (included in PF connection type mapping)
+ * - PF pages - invalid section (not dumped)
+ * - For each VF in the PF:
+ * - VF pages - valid section (included in VF connection type mapping)
+ * - VF pages - invalid section (not dumped)
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
+ /* Dump connection PF pages */
+ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ valid_conn_pf_pages,
+ ilt_pages, dump_page_ids);
+
+ /* Dump connection VF pages */
+ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages,
+ dump_page_ids);
+ }
+
+ /* CDUT pages are ordered as follows:
+ * - PF init pages (not dumped)
+ * - PF work pages
+ * - For each VF in the PF:
+ * - VF init pages (not dumped)
+ * - VF work pages
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
+ /* Dump task PF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_init_pages - pf_start_line;
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ cdut_pf_work_pages,
+ ilt_pages, dump_page_ids);
+
+ /* Dump task VF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += cdut_vf_pages)
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ cdut_vf_work_pages,
+ ilt_pages,
+ dump_page_ids);
+ }
+
+ /* Overwrite size param */
+ if (dump)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ dump, "size", offset - base_data_offset);
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
+ u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ struct phys_mem_desc *ilt_pages;
+ u8 conn_type;
+
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+
+ /* Dump global params - 22 must match number of params below */
+ offset += qed_dump_common_global_params(p_hwfn, p_ptt,
+ dump_buf + offset, dump, 22);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "ilt-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-page-size", cduc_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-first-page-id",
+ clients[ILT_CLI_CDUC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-last-page-id",
+ clients[ILT_CLI_CDUC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-pf-pages",
+ clients
+ [ILT_CLI_CDUC].pf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-vf-pages",
+ clients
+ [ILT_CLI_CDUC].vf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-conn-ctx-size",
+ conn_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-page-size", cdut_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-first-page-id",
+ clients[ILT_CLI_CDUT].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-last-page-id",
+ clients[ILT_CLI_CDUT].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-init-pages",
+ qed_get_cdut_num_pf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-init-pages",
+ qed_get_cdut_num_vf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-work-pages",
+ qed_get_cdut_num_pf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-work-pages",
+ qed_get_cdut_num_vf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-task-ctx-size",
+ p_hwfn->p_cxt_mngr->task_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "task-type-id",
+ p_hwfn->p_cxt_mngr->task_type_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "first-vf-id-in-pf",
+ p_hwfn->p_cxt_mngr->first_vf_in_pf);
+ offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes", sizeof(void *));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "pf-start-line",
+ p_hwfn->p_cxt_mngr->pf_start_line);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "page-mem-desc-size-dwords",
+ PAGE_MEM_DESC_SIZE_DWORDS);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ilt-shadow-size",
+ p_hwfn->p_cxt_mngr->ilt_shadow_size);
+ /* Additional/Less parameters require matching of number in call to
+ * dump_common_global_params()
+ */
+
+ /* Dump section containing number of PF CIDs per connection type */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_pf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES_E4);
+ for (conn_type = 0, valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
+ u32 num_pf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
+
+ if (dump)
+ *(dump_buf + offset) = num_pf_cids;
+ valid_conn_pf_cids += num_pf_cids;
+ }
+
+ /* Dump section containing number of VF CIDs per connection type */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_vf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES_E4);
+ for (conn_type = 0, valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
+ u32 num_vf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
+
+ if (dump)
+ *(dump_buf + offset) = num_vf_cids;
+ valid_conn_vf_cids += num_vf_cids;
+ }
+
+ /* Dump section containing physical memory descs for each ILT page */
+ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "ilt_page_desc", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+
+ /* Copy memory descriptors to dump buffer */
+ if (dump) {
+ u32 page_id;
+
+ for (page_id = 0; page_id < num_pages;
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
+ }
+
+ valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
+ num_cids_per_page);
+ valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
+ num_cids_per_page);
+
+ /* Dump ILT pages IDs */
+ offset += qed_ilt_dump_pages_section(p_hwfn,
+ dump_buf + offset,
+ dump,
+ valid_conn_pf_pages,
+ valid_conn_vf_pages,
+ ilt_pages, true);
+
+ /* Dump ILT pages memory */
+ offset += qed_ilt_dump_pages_section(p_hwfn,
+ dump_buf + offset,
+ dump,
+ valid_conn_pf_pages,
+ valid_conn_vf_pages,
+ ilt_pages, false);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ return offset;
+}
+
/***************************** Public Functions *******************************/
-enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
{
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- /* convert binary data to debug arrays */
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
return DBG_STATUS_OK;
}
@@ -5116,7 +4836,7 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
/* Skip Storm if it's in reset */
- if (dev_data->block_in_reset[storm->block_id])
+ if (dev_data->block_in_reset[storm->sem_block_id])
continue;
/* Read FW info for the current Storm */
@@ -5129,16 +4849,17 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
}
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum dbg_grc_params grc_param, u32 val)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum dbg_status status;
int i;
- DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
"dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
- status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ status = qed_dbg_dev_init(p_hwfn);
if (status != DBG_STATUS_OK)
return status;
@@ -5164,24 +4885,23 @@ enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
/* Update all params with the preset values */
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
+ struct grc_param_defs *defs = &s_grc_param_defs[i];
u32 preset_val;
-
/* Skip persistent params */
- if (s_grc_param_defs[i].is_persistent)
+ if (defs->is_persistent)
continue;
/* Find preset value */
if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
preset_val =
- s_grc_param_defs[i].exclude_all_preset_val;
+ defs->exclude_all_preset_val;
else if (grc_param == DBG_GRC_PARAM_CRASH)
preset_val =
- s_grc_param_defs[i].crash_preset_val;
+ defs->crash_preset_val[dev_data->chip_id];
else
return DBG_STATUS_INVALID_ARGS;
- qed_grc_set_param(p_hwfn,
- (enum dbg_grc_params)i, preset_val);
+ qed_grc_set_param(p_hwfn, i, preset_val);
}
} else {
/* Regular param - set its value */
@@ -5207,18 +4927,18 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
@@ -5258,20 +4978,19 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
u32 *buf_size)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct idle_chk_data *idle_chk;
+ struct idle_chk_data *idle_chk = &dev_data->idle_chk;
enum dbg_status status;
- idle_chk = &dev_data->idle_chk;
*buf_size = 0;
- status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ status = qed_dbg_dev_init(p_hwfn);
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
if (!idle_chk->buf_size_set) {
@@ -5306,6 +5025,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
/* Update reset state */
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Idle Check Dump */
@@ -5321,7 +5041,7 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5368,7 +5088,7 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5414,7 +5134,7 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5460,7 +5180,7 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5510,7 +5230,7 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5554,6 +5274,50 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Reveret GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum block_id block_id,
@@ -5561,19 +5325,20 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
bool clear_status,
struct dbg_attn_block_result *results)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
u8 reg_idx, num_attn_regs, num_result_regs = 0;
const struct dbg_attn_reg *attn_reg_arr;
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- attn_reg_arr = qed_get_block_attn_regs(block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ block_id,
attn_type, &num_attn_regs);
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
@@ -5618,7 +5383,7 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
results->block_id = (u8)block_id;
results->names_offset =
- qed_get_block_attn_data(block_id, attn_type)->names_offset;
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
SET_FIELD(results->data,
DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
@@ -5628,11 +5393,6 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
/******************************* Data Types **********************************/
-struct block_info {
- const char *name;
- enum block_id id;
-};
-
/* REG fifo element */
struct reg_fifo_element {
u64 data;
@@ -5656,6 +5416,12 @@ struct reg_fifo_element {
#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
};
+/* REG fifo error element */
+struct reg_fifo_err {
+ u32 err_code;
+ const char *err_msg;
+};
+
/* IGU fifo element */
struct igu_fifo_element {
u32 dword0;
@@ -5755,20 +5521,6 @@ struct igu_fifo_addr_data {
enum igu_fifo_addr_types type;
};
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
@@ -5776,7 +5528,7 @@ struct dbg_tools_user_data {
#define MCP_TRACE_MAX_MODULE_LEN 8
#define MCP_TRACE_FORMAT_MAX_PARAMS 3
#define MCP_TRACE_FORMAT_PARAM_WIDTH \
- (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+ (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
@@ -5785,107 +5537,6 @@ struct dbg_tools_user_data {
/***************************** Constant Arrays *******************************/
-struct user_dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
-/* Debug arrays */
-static struct user_dbg_array
-s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
-
-/* Block names array */
-static struct block_info s_block_info_arr[] = {
- {"grc", BLOCK_GRC},
- {"miscs", BLOCK_MISCS},
- {"misc", BLOCK_MISC},
- {"dbu", BLOCK_DBU},
- {"pglue_b", BLOCK_PGLUE_B},
- {"cnig", BLOCK_CNIG},
- {"cpmu", BLOCK_CPMU},
- {"ncsi", BLOCK_NCSI},
- {"opte", BLOCK_OPTE},
- {"bmb", BLOCK_BMB},
- {"pcie", BLOCK_PCIE},
- {"mcp", BLOCK_MCP},
- {"mcp2", BLOCK_MCP2},
- {"pswhst", BLOCK_PSWHST},
- {"pswhst2", BLOCK_PSWHST2},
- {"pswrd", BLOCK_PSWRD},
- {"pswrd2", BLOCK_PSWRD2},
- {"pswwr", BLOCK_PSWWR},
- {"pswwr2", BLOCK_PSWWR2},
- {"pswrq", BLOCK_PSWRQ},
- {"pswrq2", BLOCK_PSWRQ2},
- {"pglcs", BLOCK_PGLCS},
- {"ptu", BLOCK_PTU},
- {"dmae", BLOCK_DMAE},
- {"tcm", BLOCK_TCM},
- {"mcm", BLOCK_MCM},
- {"ucm", BLOCK_UCM},
- {"xcm", BLOCK_XCM},
- {"ycm", BLOCK_YCM},
- {"pcm", BLOCK_PCM},
- {"qm", BLOCK_QM},
- {"tm", BLOCK_TM},
- {"dorq", BLOCK_DORQ},
- {"brb", BLOCK_BRB},
- {"src", BLOCK_SRC},
- {"prs", BLOCK_PRS},
- {"tsdm", BLOCK_TSDM},
- {"msdm", BLOCK_MSDM},
- {"usdm", BLOCK_USDM},
- {"xsdm", BLOCK_XSDM},
- {"ysdm", BLOCK_YSDM},
- {"psdm", BLOCK_PSDM},
- {"tsem", BLOCK_TSEM},
- {"msem", BLOCK_MSEM},
- {"usem", BLOCK_USEM},
- {"xsem", BLOCK_XSEM},
- {"ysem", BLOCK_YSEM},
- {"psem", BLOCK_PSEM},
- {"rss", BLOCK_RSS},
- {"tmld", BLOCK_TMLD},
- {"muld", BLOCK_MULD},
- {"yuld", BLOCK_YULD},
- {"xyld", BLOCK_XYLD},
- {"ptld", BLOCK_PTLD},
- {"ypld", BLOCK_YPLD},
- {"prm", BLOCK_PRM},
- {"pbf_pb1", BLOCK_PBF_PB1},
- {"pbf_pb2", BLOCK_PBF_PB2},
- {"rpb", BLOCK_RPB},
- {"btb", BLOCK_BTB},
- {"pbf", BLOCK_PBF},
- {"rdif", BLOCK_RDIF},
- {"tdif", BLOCK_TDIF},
- {"cdu", BLOCK_CDU},
- {"ccfc", BLOCK_CCFC},
- {"tcfc", BLOCK_TCFC},
- {"igu", BLOCK_IGU},
- {"cau", BLOCK_CAU},
- {"rgfs", BLOCK_RGFS},
- {"rgsrc", BLOCK_RGSRC},
- {"tgfs", BLOCK_TGFS},
- {"tgsrc", BLOCK_TGSRC},
- {"umac", BLOCK_UMAC},
- {"xmac", BLOCK_XMAC},
- {"dbg", BLOCK_DBG},
- {"nig", BLOCK_NIG},
- {"wol", BLOCK_WOL},
- {"bmbn", BLOCK_BMBN},
- {"ipc", BLOCK_IPC},
- {"nwm", BLOCK_NWM},
- {"nws", BLOCK_NWS},
- {"ms", BLOCK_MS},
- {"phy_pcie", BLOCK_PHY_PCIE},
- {"led", BLOCK_LED},
- {"avs_wrap", BLOCK_AVS_WRAP},
- {"pxpreqbus", BLOCK_PXPREQBUS},
- {"misc_aeu", BLOCK_MISC_AEU},
- {"bar0_map", BLOCK_BAR0_MAP}
-};
-
/* Status string array */
static const char * const s_status_str[] = {
/* DBG_STATUS_OK */
@@ -5915,14 +5566,12 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
"A PCI buffer wasn't allocated",
- /* DBG_STATUS_TOO_MANY_INPUTS */
- "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
+ "The filter/trigger constraint dword offsets are not enabled for recording",
- /* DBG_STATUS_INPUT_OVERLAP */
- "Overlapping debug bus inputs",
- /* DBG_STATUS_HW_ONLY_RECORDING */
- "Cannot record Storm data since the entire recording cycle is used by HW",
+ /* DBG_STATUS_VFC_READ_ERROR */
+ "Error reading from VFC",
/* DBG_STATUS_STORM_ALREADY_ENABLED */
"The Storm was already enabled",
@@ -5939,8 +5588,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_NO_INPUT_ENABLED */
"No input was enabled for recording",
- /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
- "Filters and triggers are not allowed when recording in 64b units",
+ /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
+ "Filters and triggers are not allowed in E4 256-bit mode",
/* DBG_STATUS_FILTER_ALREADY_ENABLED */
"The filter was already enabled",
@@ -6014,8 +5663,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
- /* DBG_STATUS_RESERVED2 */
- "Reserved debug status - shouldn't be returned",
+ /* DBG_STATUS_RESERVED0 */
+ "",
/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
@@ -6038,17 +5687,32 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_DBG_ARRAY_NOT_SET */
"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
- /* DBG_STATUS_FILTER_BUG */
- "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
+ /* DBG_STATUS_RESERVED1 */
+ "",
/* DBG_STATUS_NON_MATCHING_LINES */
- "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
+ "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
- /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
- "The selected trigger dword offset wasn't enabled in the recorded HW block",
+ /* DBG_STATUS_INSUFFICIENT_HW_IDS */
+ "Insufficient HW IDs. Try to record less Storms/blocks",
/* DBG_STATUS_DBG_BUS_IN_USE */
- "The debug bus is in use"
+ "The debug bus is in use",
+
+ /* DBG_STATUS_INVALID_STORM_DBG_MODE */
+ "The storm debug mode is not supported in the current chip",
+
+ /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
+ "Other engine is supported only in BB",
+
+ /* DBG_STATUS_FILTER_SINGLE_HW_ID */
+ "The configured filter mode requires a single Storm/block input",
+
+ /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
+ "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
+
+ /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
+ "When triggering on Storm data, the Storm to trigger on must be specified"
};
/* Idle check severity names array */
@@ -6104,7 +5768,7 @@ static const char * const s_master_strs[] = {
"xsdm",
"dbu",
"dmae",
- "???",
+ "jdap",
"???",
"???",
"???",
@@ -6112,12 +5776,13 @@ static const char * const s_master_strs[] = {
};
/* REG FIFO error messages array */
-static const char * const s_reg_fifo_error_strs[] = {
- "grc timeout",
- "address doesn't belong to any block",
- "reserved address in block or write to read-only address",
- "privilege/protection mismatch",
- "path isolation error"
+static struct reg_fifo_err s_reg_fifo_errors[] = {
+ {1, "grc timeout"},
+ {2, "address doesn't belong to any block"},
+ {4, "reserved address in block or write to read-only address"},
+ {8, "privilege/protection mismatch"},
+ {16, "path isolation error"},
+ {17, "RSL error"}
};
/* IGU FIFO sources array */
@@ -6357,8 +6022,21 @@ static u32 qed_print_section_params(u32 *dump_buf,
return dump_offset;
}
-static struct dbg_tools_user_data *
-qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
+/* Returns the block name that matches the specified block ID,
+ * or NULL if not found.
+ */
+static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block_user *block =
+ (const struct dbg_block_user *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
+
+ return (const char *)block->name;
+}
+
+static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
+ *p_hwfn)
{
return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
}
@@ -6366,7 +6044,8 @@ qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
-static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
u32 *dump_buf_end,
u32 num_rules,
bool print_fw_idle_chk,
@@ -6394,19 +6073,18 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
rule_parsing_data =
- (const struct dbg_idle_chk_rule_parsing_data *)
- &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
- ptr[hdr->rule_id];
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
+ hdr->rule_id;
parsing_str_offset =
- GET_FIELD(rule_parsing_data->data,
- DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
has_fw_msg =
- GET_FIELD(rule_parsing_data->data,
- DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
- parsing_str =
- &((const char *)
- s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
- [parsing_str_offset];
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = (const char *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
+ parsing_str_offset;
lsi_msg = parsing_str;
curr_reg_id = 0;
@@ -6510,7 +6188,8 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes,
@@ -6528,8 +6207,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
*num_errors = 0;
*num_warnings = 0;
- if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
/* Read global_params section */
@@ -6562,7 +6241,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
results_offset),
"FW_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(dump_buf,
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
dump_buf_end,
num_rules,
true,
@@ -6582,7 +6262,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
results_offset),
"\nLSI_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(dump_buf,
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
dump_buf_end,
num_rules,
false,
@@ -6694,9 +6375,8 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
&offset);
- format_len =
- (format_ptr->data &
- MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_len = GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEN);
format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
if (!format_ptr->format_str) {
/* Update number of modules to be released */
@@ -6719,7 +6399,7 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
* trace_buf - MCP trace cyclic buffer
* trace_buf_size - MCP trace cyclic buffer size in bytes
* data_offset - offset in bytes of the data to parse in the MCP trace cyclic
- * buffer.
+ * buffer.
* data_size - size in bytes of data to parse.
* parsed_buf - destination buffer for parsed data.
* parsed_results_bytes - size of parsed data in bytes.
@@ -6764,9 +6444,8 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
/* Skip message if its index doesn't exist in the meta data */
if (format_idx >= meta->formats_num) {
- u8 format_size =
- (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
- MFW_TRACE_PRM_SIZE_SHIFT);
+ u8 format_size = (u8)GET_MFW_FIELD(header,
+ MFW_TRACE_PRM_SIZE);
if (data_size < format_size)
return DBG_STATUS_MCP_TRACE_BAD_DATA;
@@ -6781,11 +6460,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
format_ptr = &meta->formats[format_idx];
for (i = 0,
- param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
- param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
i < MCP_TRACE_FORMAT_MAX_PARAMS;
- i++,
- param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
/* Extract param size (0..3) */
u8 param_size = (u8)((format_ptr->data & param_mask) >>
@@ -6813,12 +6491,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
data_size -= param_size;
}
- format_level = (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
- format_module = (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_MODULE_MASK) >>
- MCP_TRACE_FORMAT_MODULE_SHIFT);
+ format_level = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEVEL);
+ format_module = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_MODULE);
if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
return DBG_STATUS_MCP_TRACE_BAD_DATA;
@@ -6960,7 +6636,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
const char *section_name, *param_name, *param_str_val;
u32 param_num_val, num_section_params, num_elements;
struct reg_fifo_element *elements;
- u8 i, j, err_val, vf_val;
+ u8 i, j, err_code, vf_val;
u32 results_offset = 0;
char vf_str[4];
@@ -6991,7 +6667,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
/* Decode elements */
for (i = 0; i < num_elements; i++) {
- bool err_printed = false;
+ const char *err_msg = NULL;
/* Discover if element belongs to a VF or a PF */
vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
@@ -7000,11 +6676,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
else
sprintf(vf_str, "%d", vf_val);
+ /* Find error message */
+ err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
+ for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
+ if (err_code == s_reg_fifo_errors[j].err_code)
+ err_msg = s_reg_fifo_errors[j].err_msg;
+
/* Add parsed element to parsed buffer */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
elements[i].data,
(u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
@@ -7021,30 +6703,8 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
s_protection_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PROTECTION)],
s_master_strs[GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_MASTER)]);
-
- /* Print errors */
- for (j = 0,
- err_val = GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_ERROR);
- j < ARRAY_SIZE(s_reg_fifo_error_strs);
- j++, err_val >>= 1) {
- if (err_val & 0x1) {
- if (err_printed)
- results_offset +=
- sprintf(qed_get_buf_ptr
- (results_buf,
- results_offset), ", ");
- results_offset +=
- sprintf(qed_get_buf_ptr
- (results_buf, results_offset), "%s",
- s_reg_fifo_error_strs[j]);
- err_printed = true;
- }
- }
-
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ REG_FIFO_ELEMENT_MASTER)],
+ err_msg ? err_msg : "unknown error code");
}
results_offset += sprintf(qed_get_buf_ptr(results_buf,
@@ -7398,27 +7058,28 @@ static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
/***************************** Public Functions *******************************/
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
{
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
/* Convert binary data to debug arrays */
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_user_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_user_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ (enum bin_dbg_buffer_type)buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
return DBG_STATUS_OK;
}
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr)
{
- p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
- GFP_KERNEL);
- if (!p_hwfn->dbg_user_info)
+ *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
+ GFP_KERNEL);
+ if (!(*user_data_ptr))
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
return DBG_STATUS_OK;
@@ -7437,7 +7098,8 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
{
u32 num_errors, num_warnings;
- return qed_parse_idle_chk_dump(dump_buf,
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
num_dumped_dwords,
NULL,
results_buf_size,
@@ -7453,7 +7115,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_idle_chk_dump(dump_buf,
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
num_dumped_dwords,
results_buf,
&parsed_buf_size,
@@ -7624,25 +7287,28 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results)
{
- struct user_dbg_array *block_attn, *pstrings;
const u32 *block_attn_name_offsets;
- enum dbg_attn_type attn_type;
+ const char *attn_name_base;
const char *block_name;
+ enum dbg_attn_type attn_type;
u8 num_regs, i, j;
num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
- attn_type = (enum dbg_attn_type)
- GET_FIELD(results->data,
- DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
- block_name = s_block_info_arr[results->block_id].name;
-
- if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
+ block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
+ if (!block_name)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
- block_attn_name_offsets = &block_attn->ptr[results->names_offset];
+ block_attn_name_offsets =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
+ results->names_offset;
+
+ attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
/* Go over registers with a non-zero attention status */
for (i = 0; i < num_regs; i++) {
@@ -7653,18 +7319,17 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result = &results->reg_results[i];
num_reg_attn = GET_FIELD(reg_result->data,
DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
- block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
- bit_mapping = &((struct dbg_attn_bit_mapping *)
- block_attn->ptr)[reg_result->block_attn_offset];
-
- pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
+ bit_mapping = (struct dbg_attn_bit_mapping *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
+ reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++) {
+ for (j = 0; j < num_reg_attn; j++, bit_idx++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
- u32 attn_name_offset, sts_addr;
+ u32 attn_name_offset;
+ u32 sts_addr;
/* Check if bit mask should be advanced (due to unused
* bits).
@@ -7676,18 +7341,19 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx))) {
- bit_idx++;
+ if (!(reg_result->sts_val & BIT(bit_idx)))
continue;
- }
- /* Find attention name */
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
attn_name_offset =
block_attn_name_offsets[attn_idx_val];
- attn_name = &((const char *)
- pstrings->ptr)[attn_name_offset];
- attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
- "Interrupt" : "Parity";
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
masked_str = reg_result->mask_val & BIT(bit_idx) ?
" [masked]" : "";
sts_addr = GET_FIELD(reg_result->data,
@@ -7695,15 +7361,15 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%s (%s) : %s [address 0x%08x, bit %d]%s\n",
block_name, attn_type_str, attn_name,
- sts_addr, bit_idx, masked_str);
-
- bit_idx++;
+ sts_addr * 4, bit_idx, masked_str);
}
}
return DBG_STATUS_OK;
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7763,7 +7429,10 @@ static struct {
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size},};
+ qed_get_fw_asserts_results_buf_size}, {
+ "ilt",
+ qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7846,6 +7515,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
return rc;
}
+#define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
+
/* Generic function for performing the dump of a debug feature. */
static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -7875,6 +7546,17 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
&buf_size_dwords);
if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
+
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
+ feature->buf_size = 0;
+ DP_NOTICE(p_hwfn->cdev,
+ "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
+ qed_features_lookup[feature_idx].name,
+ buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
+
+ return DBG_STATUS_OK;
+ }
+
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
if (!feature->dump_buf)
@@ -8021,6 +7703,16 @@ int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
}
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
+}
+
+int qed_dbg_ilt_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
+}
+
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes)
{
@@ -8037,9 +7729,17 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
* feature buffer.
*/
#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_SIZE_SHIFT 0
+#define REGDUMP_HEADER_SIZE_MASK 0xffffff
#define REGDUMP_HEADER_FEATURE_SHIFT 24
-#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_FEATURE_MASK 0x3f
#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_ENGINE_MASK 0x1
+#define REGDUMP_MAX_SIZE 0x1000000
+#define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
+
enum debug_print_features {
OLD_MODE = 0,
IDLE_CHK = 1,
@@ -8053,17 +7753,27 @@ enum debug_print_features {
NVM_CFG1 = 9,
DEFAULT_CFG = 10,
NVM_META = 11,
+ MDUMP = 12,
+ ILT_DUMP = 13,
};
-static u32 qed_calc_regdump_header(enum debug_print_features feature,
+static u32 qed_calc_regdump_header(struct qed_dev *cdev,
+ enum debug_print_features feature,
int engine, u32 feature_size, u8 omit_engine)
{
- /* Insert the engine, feature and mode inside the header and combine it
- * with feature size.
- */
- return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
- (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
- (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+ u32 res = 0;
+
+ SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
+ if (res != feature_size)
+ DP_NOTICE(cdev,
+ "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
+ feature, feature_size);
+
+ SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+ SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
+ SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
+
+ return res;
}
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
@@ -8079,9 +7789,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
- if (cdev->num_hwfns == 1)
+ if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ mutex_lock(&qed_dbg_lock);
+
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
/* Collect idle_chks and grcDump for each hw function */
@@ -8094,7 +7806,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8106,7 +7818,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8118,7 +7830,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(REG_FIFO, cur_engine,
+ qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8130,7 +7842,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8143,7 +7855,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
@@ -8158,25 +7870,45 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(FW_ASSERTS, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, FW_ASSERTS,
+ cur_engine, feature_size,
+ omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
rc);
}
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_val[i] = grc_params[i];
+ feature_size = qed_dbg_ilt_size(cdev);
+ if (!cdev->disable_ilt_dump &&
+ feature_size < ILT_DUMP_MAX_SIZE) {
+ rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, ILT_DUMP,
+ cur_engine,
+ feature_size,
+ omit_engine);
+ offset += feature_size + REGDUMP_HEADER_SIZE;
+ } else {
+ DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
+ rc);
+ }
+ }
/* GRC dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] = grc_params[i];
+
rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ qed_calc_regdump_header(cdev, GRC_DUMP,
+ cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8185,12 +7917,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
qed_set_debug_engine(cdev, org_engine);
+
/* mcp_trace */
rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8199,11 +7932,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_CFG1);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_CFG1);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(NVM_CFG1, cur_engine,
+ qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
@@ -8218,7 +7952,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
+ qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
@@ -8234,8 +7968,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size, QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8243,6 +7977,23 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
}
+ /* nvm mdump */
+ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_MDUMP);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
+ }
+
+ mutex_unlock(&qed_dbg_lock);
+
return 0;
}
@@ -8250,9 +8001,10 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
- u32 regs_len = 0, image_len = 0;
+ u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
+ cdev->disable_ilt_dump = false;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
/* Engine specific */
@@ -8267,6 +8019,12 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
REGDUMP_HEADER_SIZE +
qed_dbg_protection_override_size(cdev) +
REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+
+ ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
+ if (ilt_len < ILT_DUMP_MAX_SIZE) {
+ total_ilt_len += ilt_len;
+ regs_len += ilt_len;
+ }
}
qed_set_debug_engine(cdev, org_engine);
@@ -8282,6 +8040,17 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+
+ if (regs_len > REGDUMP_MAX_SIZE) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "Dump exceeds max size 0x%x, disable ILT dump\n",
+ REGDUMP_MAX_SIZE);
+ cdev->disable_ilt_dump = true;
+ regs_len -= total_ilt_len;
+ }
return regs_len;
}
@@ -8327,9 +8096,8 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8341,6 +8109,10 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
if (rc != DBG_STATUS_OK)
buf_size_dwords = 0;
+ /* Feature will not be dumped if it exceeds maximum size */
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
+ buf_size_dwords = 0;
+
qed_ptt_release(p_hwfn, p_ptt);
qed_feature->buf_size = buf_size_dwords * sizeof(u32);
return qed_feature->buf_size;
@@ -8360,14 +8132,21 @@ void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
void qed_dbg_pf_init(struct qed_dev *cdev)
{
- const u8 *dbg_values;
+ const u8 *dbg_values = NULL;
+ int i;
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
- qed_dbg_set_bin_ptr((u8 *)dbg_values);
- qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+
+ for_each_hwfn(cdev, i) {
+ qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ }
+
+ /* Set the hwfn to be 0 as default */
+ cdev->dbg_params.engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
@@ -8375,11 +8154,11 @@ void qed_dbg_pf_exit(struct qed_dev *cdev)
struct qed_dbg_feature *feature = NULL;
enum qed_dbg_features feature_idx;
- /* Debug features' buffers may be allocated if debug feature was used
- * but dump wasn't called.
+ /* debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called
*/
for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
- feature = &cdev->dbg_params.features[feature_idx];
+ feature = &cdev->dbg_features[feature_idx];
if (feature->dump_buf) {
vfree(feature->dump_buf);
feature->dump_buf = NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e47e0e8d75b0..edf99d296bd1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -14,11 +14,13 @@ enum qed_dbg_features {
DBG_FEATURE_IGU_FIFO,
DBG_FEATURE_PROTECTION_OVERRIDE,
DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_ILT,
DBG_FEATURE_NUM
};
/* Forward Declaration */
struct qed_dev;
+struct qed_hwfn;
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
@@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev);
int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a1ebc2b1ca0b..7912911337d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -907,7 +907,7 @@ qed_llh_access_filter(struct qed_hwfn *p_hwfn,
/* Filter value */
addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
- params.flags = QED_DMAE_FLAG_PF_DST;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
params.dst_pfid = pfid;
rc = qed_dmae_host2grc(p_hwfn,
p_ptt,
@@ -1412,6 +1412,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
+ qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1571,7 +1572,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
/* all vports participate in weighted fair queueing */
for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+ qm_info->qm_vport_params[i].wfq = 1;
}
/* initialize qm port params */
@@ -1579,6 +1580,7 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
+ struct qed_dev *cdev = p_hwfn->cdev;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -1588,11 +1590,13 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
&p_hwfn->qm_info.qm_port_params[i];
+ u16 pbf_max_cmd_lines;
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
- p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
+ p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
}
}
@@ -2034,9 +2038,8 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
vport = &(qm_info->qm_vport_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
- qm_info->start_vport + i,
- vport->vport_rl, vport->vport_wfq);
+ "vport idx %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->wfq);
for (tc = 0; tc < NUM_OF_TCS; tc++)
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
@@ -2049,11 +2052,11 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
pq = &(qm_info->qm_pq_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
qm_info->start_pq + i,
pq->port_id,
pq->vport_id,
- pq->tc_id, pq->wrr_group, pq->rl_valid);
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
}
}
@@ -2103,9 +2106,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!b_rc)
return -EINVAL;
- /* clear the QM_PF runtime phase leftovers from previous init */
- qed_init_clear_rt_data(p_hwfn);
-
/* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn, p_ptt, false);
@@ -2346,7 +2346,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
- rc = qed_dbg_alloc_user_data(p_hwfn);
+ rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
if (rc)
goto alloc_err;
}
@@ -2623,7 +2623,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
- params.vport_rl_en = qm_info->vport_rl_en;
+ params.global_rl_en = qm_info->vport_rl_en;
params.vport_wfq_en = qm_info->vport_wfq_en;
params.port_params = qm_info->qm_port_params;
@@ -2891,6 +2891,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
+
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
@@ -3000,8 +3002,10 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
- int rc = 0, i;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u16 ether_type;
+ int rc = 0, i;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
@@ -3102,6 +3106,17 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
*/
qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ fw_overlays = cdev->fw_data->fw_overlays;
+ fw_overlays_len = cdev->fw_data->fw_overlays_len;
+ p_hwfn->fw_overlay_mem =
+ qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
+ fw_overlays_len);
+ if (!p_hwfn->fw_overlay_mem) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate fw overlay memory\n");
+ goto load_err;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -3566,8 +3581,10 @@ const char *qed_hw_get_resc_name(enum qed_resources res_id)
return "RDMA_CNQ_RAM";
case QED_ILT:
return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
+ case QED_LL2_RAM_QUEUE:
+ return "LL2_RAM_QUEUE";
+ case QED_LL2_CTX_QUEUE:
+ return "LL2_CTX_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
@@ -3606,18 +3623,46 @@ __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
return 0;
}
+static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+};
+
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
+{
+ enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
+
+ if (type >= QED_NUM_HSI_DEFS) {
+ DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
+ return 0;
+ }
+
+ return qed_hsi_def_val[type][chip_id];
+}
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
-
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
- case QED_LL2_QUEUE:
- resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ case QED_LL2_RAM_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
break;
case QED_RDMA_CNQ_RAM:
/* No need for a case for QED_CMDQS_CQS since
@@ -3626,8 +3671,8 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
- resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
- : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ resc_max_val =
+ NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
break;
case QED_BDQ:
resc_max_val = BDQ_NUM_RESOURCES;
@@ -3660,28 +3705,24 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ struct qed_dev *cdev = p_hwfn->cdev;
switch (res_id) {
case QED_L2_QUEUE:
- *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
break;
case QED_VPORT:
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
break;
case QED_RSS_ENG:
- *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
break;
case QED_PQ:
- *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
- MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
@@ -3689,11 +3730,13 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
+ break;
+ case QED_LL2_RAM_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
- case QED_LL2_QUEUE:
- *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ case QED_LL2_CTX_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
@@ -3701,8 +3744,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
- RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
@@ -5087,11 +5129,11 @@ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
- vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
min_pf_rate;
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
@@ -5102,7 +5144,7 @@ static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
int i;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
- p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+ p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
}
static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
@@ -5118,7 +5160,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
qed_init_wfq_default_param(p_hwfn, min_pf_rate);
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 47376d4d071f..eb4808b3bf67 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -230,30 +230,6 @@ enum qed_dmae_address_type_t {
QED_DMAE_ADDRESS_GRC
};
-/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
- * source is a block of length DMAE_MAX_RW_SIZE and the
- * destination is larger, the source block will be duplicated as
- * many times as required to fill the destination block. This is
- * used mostly to write a zeroed buffer to destination address
- * using DMA
- */
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_VF_SRC 0x00000002
-#define QED_DMAE_FLAG_VF_DST 0x00000004
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
-#define QED_DMAE_FLAG_PORT 0x00000010
-#define QED_DMAE_FLAG_PF_SRC 0x00000020
-#define QED_DMAE_FLAG_PF_DST 0x00000040
-
-struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
- u8 src_vfid;
- u8 dst_vfid;
- u8 port_id;
- u8 src_pfid;
- u8 dst_pfid;
-};
-
/**
* @brief qed_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index de31a382f58e..4c7fa391fd33 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
goto err;
}
p_cxt = cxt_info.p_cxt;
+ memset(p_cxt, 0, sizeof(*p_cxt));
+
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index cf3ceb62e397..4597015b8bff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -98,6 +98,7 @@ enum core_event_opcode {
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
CORE_EVENT_TX_QUEUE_UPDATE,
+ CORE_EVENT_QUEUE_STATS_QUERY,
MAX_CORE_EVENT_OPCODE
};
@@ -116,7 +117,7 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
-/* Ethernet TX Per Queue Stats */
+/* LL2 TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
@@ -124,13 +125,13 @@ struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
- __le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat {
@@ -147,6 +148,18 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Structure for doorbell data, in PWM mode, for RX producers update. */
+struct core_pwm_prod_update_data {
+ __le16 icid; /* internal CID */
+ u8 reserved0;
+ u8 params;
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
+ struct core_ll2_rx_prod prod; /* Producers */
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -156,6 +169,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
CORE_RAMROD_TX_QUEUE_UPDATE,
+ CORE_RAMROD_QUEUE_STATS_QUERY,
MAX_CORE_RAMROD_CMD_ID
};
@@ -236,7 +250,8 @@ struct core_rx_gsi_offload_cqe {
__le16 src_mac_addrlo;
__le16 qp_id;
__le32 src_qp;
- __le32 reserved[3];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved;
};
/* Core RX CQE for Light L2 */
@@ -274,8 +289,11 @@ struct core_rx_start_ramrod_data {
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
+ u8 vport_id_valid;
+ u8 vport_id;
+ u8 zero_prod_flg;
u8 wipe_inner_vlan_pri_en;
- u8 reserved[5];
+ u8 reserved[2];
};
/* Ramrod data for rx queue stop ramrod */
@@ -352,8 +370,11 @@ struct core_tx_start_ramrod_data {
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
+ u8 ctx_stats_en;
+ u8 vport_id_valid;
u8 vport_id;
- u8 resrved[2];
+ u8 enforce_security_flag;
+ u8 reserved[7];
};
/* Ramrod data for tx queue stop ramrod */
@@ -385,7 +406,7 @@ struct ystorm_core_conn_st_ctx {
/* The core storm context for the Pstorm */
struct pstorm_core_conn_st_ctx {
- __le32 reserved[4];
+ __le32 reserved[20];
};
/* Core Slowpath Connection storm context of Xstorm */
@@ -761,7 +782,7 @@ struct e4_tstorm_core_conn_ag_ctx {
__le16 word1;
__le16 word2;
__le16 word3;
- __le32 reg9;
+ __le32 ll2_rx_prod;
__le32 reg10;
};
@@ -836,11 +857,16 @@ struct e4_ustorm_core_conn_ag_ctx {
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
- __le32 reserved[24];
+ __le32 reserved[40];
};
/* The core storm context for the Ustorm */
struct ustorm_core_conn_st_ctx {
+ __le32 reserved[20];
+};
+
+/* The core storm context for the Tstorm */
+struct tstorm_core_conn_st_ctx {
__le32 reserved[4];
};
@@ -857,6 +883,8 @@ struct e4_core_conn_context {
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
+ struct tstorm_core_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
};
struct eth_mstorm_per_pf_stat {
@@ -888,12 +916,21 @@ struct eth_pstorm_per_pf_stat {
struct regpair sent_gre_bytes;
struct regpair sent_vxlan_bytes;
struct regpair sent_geneve_bytes;
+ struct regpair sent_mpls_bytes;
+ struct regpair sent_gre_mpls_bytes;
+ struct regpair sent_udp_mpls_bytes;
struct regpair sent_gre_pkts;
struct regpair sent_vxlan_pkts;
struct regpair sent_geneve_pkts;
+ struct regpair sent_mpls_pkts;
+ struct regpair sent_gre_mpls_pkts;
+ struct regpair sent_udp_mpls_pkts;
struct regpair gre_drop_pkts;
struct regpair vxlan_drop_pkts;
struct regpair geneve_drop_pkts;
+ struct regpair mpls_drop_pkts;
+ struct regpair gre_mpls_drop_pkts;
+ struct regpair udp_mpls_drop_pkts;
};
/* Ethernet TX Per Queue Stats */
@@ -983,7 +1020,8 @@ union event_ring_data {
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
- __le16 reserved0;
+ u8 reserved0;
+ u8 vf_id;
__le16 echo;
u8 fw_return_code;
u8 flags;
@@ -1061,7 +1099,20 @@ enum malicious_vf_error_id {
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
ETH_PACKET_SIZE_TOO_LARGE,
- MAX_MALICIOUS_VF_ERROR_ID
+ CORE_ILLEGAL_VLAN_MODE,
+ CORE_ILLEGAL_NBDS,
+ CORE_FIRST_BD_WO_SOP,
+ CORE_INSUFFICIENT_BDS,
+ CORE_PACKET_TOO_SMALL,
+ CORE_ILLEGAL_INBAND_TAGS,
+ CORE_VLAN_INSERT_AND_INBAND_VLAN,
+ CORE_MTU_VIOLATION,
+ CORE_CONTROL_PACKET_VIOLATION,
+ CORE_ANTI_SPOOFING_ERR,
+ CORE_PACKET_SIZE_TOO_LARGE,
+ CORE_ILLEGAL_BD_FLAGS,
+ CORE_GSI_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID,
};
/* Mstorm non-triggering VF zone */
@@ -1367,6 +1418,16 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE
};
+/* Xstorm non-triggering VF zone */
+struct xstorm_non_trigger_vf_zone {
+ struct regpair non_edpm_ack_pkts;
+};
+
+/* Tstorm VF zone */
+struct xstorm_vf_zone {
+ struct xstorm_non_trigger_vf_zone non_trigger;
+};
+
/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
@@ -1435,7 +1496,11 @@ struct dmae_cmd {
__le16 crc16;
__le16 crc16_c;
__le16 crc10;
- __le16 reserved;
+ __le16 error_bit_reserved;
+#define DMAE_CMD_ERROR_BIT_MASK 0x1
+#define DMAE_CMD_ERROR_BIT_SHIFT 0
+#define DMAE_CMD_RESERVED_MASK 0x7FFF
+#define DMAE_CMD_RESERVED_SHIFT 1
__le16 xsum16;
__le16 xsum8;
};
@@ -1566,6 +1631,41 @@ struct e4_ystorm_core_conn_ag_ctx {
__le32 reg3;
};
+/* DMAE parameters */
+struct qed_dmae_params {
+ u32 flags;
+/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1
+#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0
+#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1
+#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2
+#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1
+#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3
+#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4
+#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5
+#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6
+#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF
+#define QED_DMAE_PARAMS_RESERVED_SHIFT 7
+ u8 src_vfid;
+ u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
@@ -1743,102 +1843,23 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
+/* Physical memory descriptor */
+struct phys_mem_desc {
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ u32 size; /* In bytes */
+};
+
+/* Virtual memory descriptor */
+struct virt_mem_desc {
+ void *ptr;
+ u32 size; /* In bytes */
+};
+
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
-enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_PTU = 0x560000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PTLD = 0x5a0000,
- GRCBASE_YPLD = 0x5c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_RGFS = 0xf00000,
- GRCBASE_RGSRC = 0x320000,
- GRCBASE_TGFS = 0xd00000,
- GRCBASE_TGSRC = 0x322000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_LED = 0x6b8000,
- GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_PXPREQBUS = 0x56000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
- MAX_BLOCK_ADDR
-};
-
enum block_id {
BLOCK_GRC,
BLOCK_MISCS,
@@ -1893,8 +1914,6 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -1908,12 +1927,9 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
- BLOCK_RGFS,
- BLOCK_RGSRC,
- BLOCK_TGFS,
- BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
+ BLOCK_MSTAT,
BLOCK_DBG,
BLOCK_NIG,
BLOCK_WOL,
@@ -1926,8 +1942,17 @@ enum block_id {
BLOCK_LED,
BLOCK_AVS_WRAP,
BLOCK_PXPREQBUS,
- BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
MAX_BLOCK_ID
};
@@ -1944,10 +1969,13 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BUS_BLOCKS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BUS_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -2031,20 +2059,54 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
-/* Debug Bus block data */
-struct dbg_bus_block {
- u8 num_of_lines;
- u8 has_latency_events;
- u16 lines_offset;
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
};
-/* Debug Bus block user data */
-struct dbg_bus_block_user_data {
- u8 num_of_lines;
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
u8 has_latency_events;
u16 names_offset;
};
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
/* Block Debug line data */
struct dbg_bus_line {
u8 data;
@@ -2197,22 +2259,33 @@ enum dbg_idle_chk_severity_types {
MAX_DBG_IDLE_CHK_SEVERITY_TYPES
};
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
/* Debug Bus block data */
struct dbg_bus_block_data {
- u16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
u8 line_num;
u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
};
-/* Debug Bus Clients */
enum dbg_bus_clients {
DBG_BUS_CLIENT_RBCN,
DBG_BUS_CLIENT_RBCP,
@@ -2253,11 +2326,10 @@ enum dbg_bus_constraint_ops {
/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
- u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
};
/* Debug Bus memory address */
@@ -2307,8 +2379,7 @@ struct dbg_bus_storm_data {
struct dbg_bus_data {
u32 app_version;
u8 state;
- u8 hw_dwords;
- u16 hw_id_mask;
+ u8 mode_256b_en;
u8 num_enabled_blocks;
u8 num_enabled_storms;
u8 target;
@@ -2319,67 +2390,21 @@ struct dbg_bus_data {
u8 adding_filter;
u8 filter_pre_trigger;
u8 filter_post_trigger;
- u16 reserved;
u8 trigger_en;
- struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_constraint_dword_mask;
u8 next_trigger_state;
u8 next_constraint_id;
- u8 unify_inputs;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[88];
+ struct dbg_bus_block_data blocks[132];
struct dbg_bus_storm_data storms[6];
};
-/* Debug bus filter types */
-enum dbg_bus_filter_types {
- DBG_BUS_FILTER_TYPE_OFF,
- DBG_BUS_FILTER_TYPE_PRE,
- DBG_BUS_FILTER_TYPE_POST,
- DBG_BUS_FILTER_TYPE_ON,
- MAX_DBG_BUS_FILTER_TYPES
-};
-
-/* Debug bus frame modes */
-enum dbg_bus_frame_modes {
- DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
- DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
- DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
- MAX_DBG_BUS_FRAME_MODES
-};
-
-/* Debug bus other engine mode */
-enum dbg_bus_other_engine_modes {
- DBG_BUS_OTHER_ENGINE_MODE_NONE,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
- MAX_DBG_BUS_OTHER_ENGINE_MODES
-};
-
-/* Debug bus post-trigger recording types */
-enum dbg_bus_post_trigger_types {
- DBG_BUS_POST_TRIGGER_RECORD,
- DBG_BUS_POST_TRIGGER_DROP,
- MAX_DBG_BUS_POST_TRIGGER_TYPES
-};
-
-/* Debug bus pre-trigger recording types */
-enum dbg_bus_pre_trigger_types {
- DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
- DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
- DBG_BUS_PRE_TRIGGER_DROP,
- MAX_DBG_BUS_PRE_TRIGGER_TYPES
-};
-
-/* Debug bus SEMI frame modes */
-enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
- MAX_DBG_BUS_SEMI_FRAME_MODES
-};
-
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE,
@@ -2397,7 +2422,9 @@ enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_DRA_W,
DBG_BUS_STORM_MODE_LD_ST_ADDR,
DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
DBG_BUS_STORM_MODE_FOC,
DBG_BUS_STORM_MODE_EXT_STORE,
MAX_DBG_BUS_STORM_MODES
@@ -2438,13 +2465,13 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_CAU,
DBG_GRC_PARAM_DUMP_QM,
DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_DUMP_DORQ,
DBG_GRC_PARAM_DUMP_CFC,
DBG_GRC_PARAM_DUMP_IGU,
DBG_GRC_PARAM_DUMP_BRB,
DBG_GRC_PARAM_DUMP_BTB,
DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_DUMP_NIG,
+ DBG_GRC_PARAM_RESERVD1,
DBG_GRC_PARAM_DUMP_MULD,
DBG_GRC_PARAM_DUMP_PRS,
DBG_GRC_PARAM_DUMP_DMAE,
@@ -2453,8 +2480,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_DIF,
DBG_GRC_PARAM_DUMP_STATIC,
DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_NUM_LCIDS,
- DBG_GRC_PARAM_NUM_LTIDS,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_EXCLUDE_ALL,
DBG_GRC_PARAM_CRASH,
DBG_GRC_PARAM_PARITY_SAFE,
@@ -2462,22 +2489,14 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
MAX_DBG_GRC_PARAMS
};
-/* Debug reset registers */
-enum dbg_reset_regs {
- DBG_RESET_REG_MISCS_PL_UA,
- DBG_RESET_REG_MISCS_PL_HV,
- DBG_RESET_REG_MISCS_PL_HV_2,
- DBG_RESET_REG_MISC_PL_UA,
- DBG_RESET_REG_MISC_PL_HV,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- DBG_RESET_REG_MISC_PL_PDA_VAUX,
- MAX_DBG_RESET_REGS
-};
-
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -2489,15 +2508,15 @@ enum dbg_status {
DBG_STATUS_INVALID_PCI_BUF_SIZE,
DBG_STATUS_PCI_BUF_ALLOC_FAILED,
DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_TOO_MANY_INPUTS,
- DBG_STATUS_INPUT_OVERLAP,
- DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
DBG_STATUS_STORM_ALREADY_ENABLED,
DBG_STATUS_STORM_NOT_ENABLED,
DBG_STATUS_BLOCK_ALREADY_ENABLED,
DBG_STATUS_BLOCK_NOT_ENABLED,
DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
DBG_STATUS_FILTER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_NOT_ENABLED,
@@ -2522,7 +2541,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED2,
+ DBG_STATUS_RESERVED0,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2530,10 +2549,15 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_RESERVED1,
DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
MAX_DBG_STATUS
};
@@ -2569,9 +2593,9 @@ struct dbg_tools_data {
struct dbg_bus_data bus;
struct idle_chk_data idle_chk;
u8 mode_enable[40];
- u8 block_in_reset[88];
+ u8 block_in_reset[132];
u8 chip_id;
- u8 platform_id;
+ u8 hw_type;
u8 num_ports;
u8 num_pfs_per_port;
u8 num_vfs;
@@ -2582,6 +2606,19 @@ struct dbg_tools_data {
u32 num_regs_read;
};
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2630,13 +2667,18 @@ struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
+/* QM per global RL init parameters */
+struct init_qm_global_rl_params {
+ u32 rate_limit;
+};
+
/* QM per-port init parameters */
struct init_qm_port_params {
- u8 active;
- u8 active_phys_tcs;
+ u16 active_phys_tcs;
u16 num_pbf_cmd_lines;
u16 num_btb_blocks;
- u16 reserved;
+ u8 active;
+ u8 reserved;
};
/* QM per-PQ init parameters */
@@ -2645,15 +2687,14 @@ struct init_qm_pq_params {
u8 tc_id;
u8 wrr_group;
u8 rl_valid;
+ u16 rl_id;
u8 port_id;
- u8 reserved0;
- u16 reserved1;
+ u8 reserved;
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
- u32 vport_rl;
- u16 vport_wfq;
+ u16 wfq;
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2673,13 +2714,12 @@ struct init_qm_vport_params {
enum chip_ids {
CHIP_BB,
CHIP_K2,
- CHIP_RESERVED,
MAX_CHIP_IDS
};
struct fw_asserts_ram_section {
- u16 section_ram_line_offset;
- u16 section_ram_line_size;
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
u8 list_dword_offset;
u8 list_element_dword_size;
u8 list_num_elements;
@@ -2729,6 +2769,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_RESERVED6,
+ MODE_RESERVED7,
MAX_INIT_MODES
};
@@ -2763,9 +2804,19 @@ enum bin_init_buffer_type {
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
BIN_BUF_INIT_IRO,
+ BIN_BUF_INIT_OVERLAYS,
MAX_BIN_INIT_BUFFER_TYPE
};
+/* FW overlay buffer header */
+struct fw_overlay_buf_hdr {
+ u32 data;
+#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF
+#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
+};
+
/* init array header: raw */
struct init_array_raw_hdr {
u32 data;
@@ -2859,10 +2910,8 @@ struct init_if_phase_op {
u32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
u32 phase_data;
@@ -2991,9 +3040,11 @@ struct iro {
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
*
+ * @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
-enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
/**
* @brief qed_read_regs - Reads registers into a buffer (using GRC).
@@ -3037,7 +3088,6 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
* - val is outside the allowed boundaries
*/
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum dbg_grc_params grc_param, u32 val);
/**
@@ -3358,20 +3408,36 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
char *format_str;
};
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
/******************************** Constants **********************************/
#define MAX_NAME_LEN 16
@@ -3382,16 +3448,20 @@ struct mcp_trace_format {
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays.
*
+ * @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
/**
* @brief qed_dbg_alloc_user_data - Allocates user debug data.
*
* @param p_hwfn - HW device data
+ * @param user_data_ptr - OUT: a pointer to the allocated memory.
*/
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn);
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
@@ -3664,271 +3734,6 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
-/* Debug Bus blocks */
-static const u32 dbg_bus_blocks[] = {
- 0x0000000f, /* grc, bb, 15 lines */
- 0x0000000f, /* grc, k2, 15 lines */
- 0x00000000,
- 0x00000000, /* miscs, bb, 0 lines */
- 0x00000000, /* miscs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* misc, bb, 0 lines */
- 0x00000000, /* misc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* dbu, bb, 0 lines */
- 0x00000000, /* dbu, k2, 0 lines */
- 0x00000000,
- 0x000f0127, /* pglue_b, bb, 39 lines */
- 0x0036012a, /* pglue_b, k2, 42 lines */
- 0x00000000,
- 0x00000000, /* cnig, bb, 0 lines */
- 0x00120102, /* cnig, k2, 2 lines */
- 0x00000000,
- 0x00000000, /* cpmu, bb, 0 lines */
- 0x00000000, /* cpmu, k2, 0 lines */
- 0x00000000,
- 0x00000001, /* ncsi, bb, 1 lines */
- 0x00000001, /* ncsi, k2, 1 lines */
- 0x00000000,
- 0x00000000, /* opte, bb, 0 lines */
- 0x00000000, /* opte, k2, 0 lines */
- 0x00000000,
- 0x00600085, /* bmb, bb, 133 lines */
- 0x00600085, /* bmb, k2, 133 lines */
- 0x00000000,
- 0x00000000, /* pcie, bb, 0 lines */
- 0x00e50033, /* pcie, k2, 51 lines */
- 0x00000000,
- 0x00000000, /* mcp, bb, 0 lines */
- 0x00000000, /* mcp, k2, 0 lines */
- 0x00000000,
- 0x01180009, /* mcp2, bb, 9 lines */
- 0x01180009, /* mcp2, k2, 9 lines */
- 0x00000000,
- 0x01210104, /* pswhst, bb, 4 lines */
- 0x01210104, /* pswhst, k2, 4 lines */
- 0x00000000,
- 0x01250103, /* pswhst2, bb, 3 lines */
- 0x01250103, /* pswhst2, k2, 3 lines */
- 0x00000000,
- 0x00340101, /* pswrd, bb, 1 lines */
- 0x00340101, /* pswrd, k2, 1 lines */
- 0x00000000,
- 0x01280119, /* pswrd2, bb, 25 lines */
- 0x01280119, /* pswrd2, k2, 25 lines */
- 0x00000000,
- 0x01410109, /* pswwr, bb, 9 lines */
- 0x01410109, /* pswwr, k2, 9 lines */
- 0x00000000,
- 0x00000000, /* pswwr2, bb, 0 lines */
- 0x00000000, /* pswwr2, k2, 0 lines */
- 0x00000000,
- 0x001c0001, /* pswrq, bb, 1 lines */
- 0x001c0001, /* pswrq, k2, 1 lines */
- 0x00000000,
- 0x014a0015, /* pswrq2, bb, 21 lines */
- 0x014a0015, /* pswrq2, k2, 21 lines */
- 0x00000000,
- 0x00000000, /* pglcs, bb, 0 lines */
- 0x00120006, /* pglcs, k2, 6 lines */
- 0x00000000,
- 0x00100001, /* dmae, bb, 1 lines */
- 0x00100001, /* dmae, k2, 1 lines */
- 0x00000000,
- 0x015f0105, /* ptu, bb, 5 lines */
- 0x015f0105, /* ptu, k2, 5 lines */
- 0x00000000,
- 0x01640120, /* tcm, bb, 32 lines */
- 0x01640120, /* tcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* mcm, bb, 32 lines */
- 0x01640120, /* mcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* ucm, bb, 32 lines */
- 0x01640120, /* ucm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* xcm, bb, 32 lines */
- 0x01640120, /* xcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* ycm, bb, 32 lines */
- 0x01640120, /* ycm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* pcm, bb, 32 lines */
- 0x01640120, /* pcm, k2, 32 lines */
- 0x00000000,
- 0x01840062, /* qm, bb, 98 lines */
- 0x01840062, /* qm, k2, 98 lines */
- 0x00000000,
- 0x01e60021, /* tm, bb, 33 lines */
- 0x01e60021, /* tm, k2, 33 lines */
- 0x00000000,
- 0x02070107, /* dorq, bb, 7 lines */
- 0x02070107, /* dorq, k2, 7 lines */
- 0x00000000,
- 0x00600185, /* brb, bb, 133 lines */
- 0x00600185, /* brb, k2, 133 lines */
- 0x00000000,
- 0x020e0019, /* src, bb, 25 lines */
- 0x020c001a, /* src, k2, 26 lines */
- 0x00000000,
- 0x02270104, /* prs, bb, 4 lines */
- 0x02270104, /* prs, k2, 4 lines */
- 0x00000000,
- 0x022b0133, /* tsdm, bb, 51 lines */
- 0x022b0133, /* tsdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* msdm, bb, 51 lines */
- 0x022b0133, /* msdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* usdm, bb, 51 lines */
- 0x022b0133, /* usdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* xsdm, bb, 51 lines */
- 0x022b0133, /* xsdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* ysdm, bb, 51 lines */
- 0x022b0133, /* ysdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* psdm, bb, 51 lines */
- 0x022b0133, /* psdm, k2, 51 lines */
- 0x00000000,
- 0x025e010c, /* tsem, bb, 12 lines */
- 0x025e010c, /* tsem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* msem, bb, 12 lines */
- 0x025e010c, /* msem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* usem, bb, 12 lines */
- 0x025e010c, /* usem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* xsem, bb, 12 lines */
- 0x025e010c, /* xsem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* ysem, bb, 12 lines */
- 0x025e010c, /* ysem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* psem, bb, 12 lines */
- 0x025e010c, /* psem, k2, 12 lines */
- 0x00000000,
- 0x026a000d, /* rss, bb, 13 lines */
- 0x026a000d, /* rss, k2, 13 lines */
- 0x00000000,
- 0x02770106, /* tmld, bb, 6 lines */
- 0x02770106, /* tmld, k2, 6 lines */
- 0x00000000,
- 0x027d0106, /* muld, bb, 6 lines */
- 0x027d0106, /* muld, k2, 6 lines */
- 0x00000000,
- 0x02770005, /* yuld, bb, 5 lines */
- 0x02770005, /* yuld, k2, 5 lines */
- 0x00000000,
- 0x02830107, /* xyld, bb, 7 lines */
- 0x027d0107, /* xyld, k2, 7 lines */
- 0x00000000,
- 0x00000000, /* ptld, bb, 0 lines */
- 0x00000000, /* ptld, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* ypld, bb, 0 lines */
- 0x00000000, /* ypld, k2, 0 lines */
- 0x00000000,
- 0x028a010e, /* prm, bb, 14 lines */
- 0x02980110, /* prm, k2, 16 lines */
- 0x00000000,
- 0x02a8000d, /* pbf_pb1, bb, 13 lines */
- 0x02a8000d, /* pbf_pb1, k2, 13 lines */
- 0x00000000,
- 0x02a8000d, /* pbf_pb2, bb, 13 lines */
- 0x02a8000d, /* pbf_pb2, k2, 13 lines */
- 0x00000000,
- 0x02a8000d, /* rpb, bb, 13 lines */
- 0x02a8000d, /* rpb, k2, 13 lines */
- 0x00000000,
- 0x00600185, /* btb, bb, 133 lines */
- 0x00600185, /* btb, k2, 133 lines */
- 0x00000000,
- 0x02b50117, /* pbf, bb, 23 lines */
- 0x02b50117, /* pbf, k2, 23 lines */
- 0x00000000,
- 0x02cc0006, /* rdif, bb, 6 lines */
- 0x02cc0006, /* rdif, k2, 6 lines */
- 0x00000000,
- 0x02d20006, /* tdif, bb, 6 lines */
- 0x02d20006, /* tdif, k2, 6 lines */
- 0x00000000,
- 0x02d80003, /* cdu, bb, 3 lines */
- 0x02db000e, /* cdu, k2, 14 lines */
- 0x00000000,
- 0x02e9010d, /* ccfc, bb, 13 lines */
- 0x02f60117, /* ccfc, k2, 23 lines */
- 0x00000000,
- 0x02e9010d, /* tcfc, bb, 13 lines */
- 0x02f60117, /* tcfc, k2, 23 lines */
- 0x00000000,
- 0x030d0133, /* igu, bb, 51 lines */
- 0x030d0133, /* igu, k2, 51 lines */
- 0x00000000,
- 0x03400106, /* cau, bb, 6 lines */
- 0x03400106, /* cau, k2, 6 lines */
- 0x00000000,
- 0x00000000, /* rgfs, bb, 0 lines */
- 0x00000000, /* rgfs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* rgsrc, bb, 0 lines */
- 0x00000000, /* rgsrc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* tgfs, bb, 0 lines */
- 0x00000000, /* tgfs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* tgsrc, bb, 0 lines */
- 0x00000000, /* tgsrc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* umac, bb, 0 lines */
- 0x00120006, /* umac, k2, 6 lines */
- 0x00000000,
- 0x00000000, /* xmac, bb, 0 lines */
- 0x00000000, /* xmac, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* dbg, bb, 0 lines */
- 0x00000000, /* dbg, k2, 0 lines */
- 0x00000000,
- 0x0346012b, /* nig, bb, 43 lines */
- 0x0346011d, /* nig, k2, 29 lines */
- 0x00000000,
- 0x00000000, /* wol, bb, 0 lines */
- 0x001c0002, /* wol, k2, 2 lines */
- 0x00000000,
- 0x00000000, /* bmbn, bb, 0 lines */
- 0x00210008, /* bmbn, k2, 8 lines */
- 0x00000000,
- 0x00000000, /* ipc, bb, 0 lines */
- 0x00000000, /* ipc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* nwm, bb, 0 lines */
- 0x0371000b, /* nwm, k2, 11 lines */
- 0x00000000,
- 0x00000000, /* nws, bb, 0 lines */
- 0x037c0009, /* nws, k2, 9 lines */
- 0x00000000,
- 0x00000000, /* ms, bb, 0 lines */
- 0x00120004, /* ms, k2, 4 lines */
- 0x00000000,
- 0x00000000, /* phy_pcie, bb, 0 lines */
- 0x00e5001a, /* phy_pcie, k2, 26 lines */
- 0x00000000,
- 0x00000000, /* led, bb, 0 lines */
- 0x00000000, /* led, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* avs_wrap, bb, 0 lines */
- 0x00000000, /* avs_wrap, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* bar0_map, bb, 0 lines */
- 0x00000000, /* bar0_map, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* bar0_map, bb, 0 lines */
- 0x00000000, /* bar0_map, k2, 0 lines */
- 0x00000000,
-};
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3942,22 +3747,28 @@ static const u32 dbg_bus_blocks[] = {
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL
+
+/* Win 12 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL
+
+/* Win 13 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -3982,7 +3793,7 @@ struct qed_qm_common_rt_init_params {
u8 max_phys_tcs_per_port;
bool pf_rl_en;
bool pf_wfq_en;
- bool vport_rl_en;
+ bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
};
@@ -4001,11 +3812,10 @@ struct qed_qm_pf_rt_init_params {
u16 start_pq;
u16 num_pf_pqs;
u16 num_vf_pqs;
- u8 start_vport;
- u8 num_vports;
+ u16 start_vport;
+ u16 num_vports;
u16 pf_wfq;
u32 pf_rl;
- u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
};
@@ -4054,22 +3864,22 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ * @brief qed_init_global_rl - Initializes the rate limit of the specified
+ * rate limiter
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
- * @param link_speed - link speed in Mbps.
+ * @param rl_id - RL ID
+ * @param rate_limit - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed);
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rl_id, u32 rate_limit);
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
@@ -4157,7 +3967,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
* @brief qed_gft_config - Enable and configure HW for GFT
*
- * @param p_hwfn
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
@@ -4242,6 +4052,42 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
+/**
+ * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ *
+ * @param p_hwfn - HW device data
+ * @param fw_overlay_in_buf - the input FW overlay buffer.
+ * @param buf_size - the size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
+ * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ *
+ * @return a pointer to the allocated overlays memory,
+ * or NULL in case of failures.
+ */
+struct phys_mem_desc *
+qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const fw_overlay_in_buf,
+ u32 buf_size_in_bytes);
+
+/**
+ * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ *
+ * @param p_hwfn - HW device data.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param fw_overlay_mem - the allocated FW overlay memory.
+ */
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem);
+
+/**
+ * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ *
+ * @param p_hwfn - HW device data.
+ * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ */
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
@@ -4282,851 +4128,807 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[8].base + ((pq_id) * IRO[8].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
+
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+ (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+ (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+ (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+ (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
/* Mstorm queue statistics */
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[19].base + ((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode.
+ * mode
*/
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+ (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[28].base + ((queue_id) * IRO[28].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
/* Mstorm pf statistics */
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
/* Ustorm queue statistics */
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+ (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
- (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[31].base + ((pf_id) * IRO[31].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
/* Pstorm pf statistics */
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[27].base + ((eth_type_id) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[34].base + ((eth_type_id) * IRO[34].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[36].base + ((pf_id) * IRO[36].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update.
+ * Use eth_tstorm_rss_update_data for update
*/
#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[30].base + ((pf_id) * IRO[30].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
/* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[31].base + ((queue_id) * IRO[31].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
+ (IRO[38].base + ((queue_id) * IRO[38].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
/* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[32].base + ((rss_id) * IRO[32].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+ (IRO[39].base + ((rss_id) * IRO[39].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
/* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[33].base + ((rss_id) * IRO[33].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
+ (IRO[40].base + ((rss_id) * IRO[40].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
/* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[34].base + ((pf_id) * IRO[34].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
+ (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id.
+ * BDqueue-id
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
+ ((bdq_id) * IRO[43].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
+ ((bdq_id) * IRO[44].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[45].base + ((storage_func_id) * IRO[45].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[46].base + ((storage_func_id) * IRO[46].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[47].base + ((storage_func_id) * IRO[47].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[48].base + ((storage_func_id) * IRO[48].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[49].base + ((storage_func_id) * IRO[49].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[50].base + ((storage_func_id) * IRO[50].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[44].base + ((pf_id) * IRO[44].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
+ (IRO[51].base + ((pf_id) * IRO[51].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[45].base + ((pf_id) * IRO[45].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
+ (IRO[52].base + ((pf_id) * IRO[52].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
/* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
/* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
+ (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
/* Xstorm error level for assert */
#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[48].base + ((pf_id) * IRO[48].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+ (IRO[55].base + ((pf_id) * IRO[55].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
/* Ystorm error level for assert */
#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[49].base + ((pf_id) * IRO[49].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+ (IRO[56].base + ((pf_id) * IRO[56].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
/* Pstorm error level for assert */
#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[50].base + ((pf_id) * IRO[50].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+ (IRO[57].base + ((pf_id) * IRO[57].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
/* Tstorm error level for assert */
#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+ (IRO[58].base + ((pf_id) * IRO[58].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
/* Mstorm error level for assert */
#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+ (IRO[59].base + ((pf_id) * IRO[59].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
/* Ustorm error level for assert */
#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[53].base + ((pf_id) * IRO[53].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
+ (IRO[60].base + ((pf_id) * IRO[60].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[54].base + ((pf_id) * IRO[54].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
+ (IRO[61].base + ((pf_id) * IRO[61].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
- (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
+ (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[59].base + ((roce_pf_id) * IRO[59].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
-
-static const struct iro iro_arr[60] = {
- {0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb8, 0x88, 0x0, 0x0, 0x88},
- {0x6530, 0x20, 0x0, 0x0, 0x20},
- {0xb00, 0x8, 0x0, 0x0, 0x4},
- {0xa80, 0x8, 0x0, 0x0, 0x4},
- {0x0, 0x8, 0x0, 0x0, 0x2},
- {0x80, 0x8, 0x0, 0x0, 0x4},
- {0x84, 0x8, 0x0, 0x0, 0x2},
- {0x4c48, 0x0, 0x0, 0x0, 0x78},
- {0x3e38, 0x0, 0x0, 0x0, 0x78},
- {0x3ef8, 0x0, 0x0, 0x0, 0x78},
- {0x4c40, 0x0, 0x0, 0x0, 0x78},
- {0x4998, 0x0, 0x0, 0x0, 0x78},
- {0x7f50, 0x0, 0x0, 0x0, 0x78},
- {0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x6210, 0x10, 0x0, 0x0, 0x10},
- {0xb820, 0x30, 0x0, 0x0, 0x30},
- {0xa990, 0x30, 0x0, 0x0, 0x30},
- {0x4b68, 0x80, 0x0, 0x0, 0x40},
- {0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0x53a8, 0x80, 0x4, 0x0, 0x4},
- {0xc7d0, 0x0, 0x0, 0x0, 0x4},
- {0x4ba8, 0x80, 0x0, 0x0, 0x20},
- {0x8158, 0x40, 0x0, 0x0, 0x30},
- {0xe770, 0x60, 0x0, 0x0, 0x60},
- {0x4090, 0x80, 0x0, 0x0, 0x38},
- {0xfea8, 0x78, 0x0, 0x0, 0x78},
- {0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xaf20, 0x0, 0x0, 0x0, 0xf0},
- {0xb010, 0x8, 0x0, 0x0, 0x8},
- {0xc00, 0x8, 0x0, 0x0, 0x8},
- {0x1f8, 0x8, 0x0, 0x0, 0x8},
- {0xac0, 0x8, 0x0, 0x0, 0x8},
- {0x2578, 0x8, 0x0, 0x0, 0x8},
- {0x24f8, 0x8, 0x0, 0x0, 0x8},
- {0x0, 0x8, 0x0, 0x0, 0x8},
- {0x400, 0x18, 0x8, 0x0, 0x8},
- {0xb78, 0x18, 0x8, 0x0, 0x2},
- {0xd898, 0x50, 0x0, 0x0, 0x3c},
- {0x12908, 0x18, 0x0, 0x0, 0x10},
- {0x11aa8, 0x40, 0x0, 0x0, 0x18},
- {0xa588, 0x50, 0x0, 0x0, 0x20},
- {0x8f00, 0x40, 0x0, 0x0, 0x28},
- {0x10e30, 0x18, 0x0, 0x0, 0x10},
- {0xde48, 0x48, 0x0, 0x0, 0x38},
- {0x11298, 0x20, 0x0, 0x0, 0x20},
- {0x40c8, 0x80, 0x0, 0x0, 0x10},
- {0x5048, 0x10, 0x0, 0x0, 0x10},
- {0xc748, 0x8, 0x0, 0x0, 0x1},
- {0xa928, 0x8, 0x0, 0x0, 0x1},
- {0x11a30, 0x8, 0x0, 0x0, 0x1},
- {0xf030, 0x8, 0x0, 0x0, 0x1},
- {0x13028, 0x8, 0x0, 0x0, 0x1},
- {0x12c58, 0x8, 0x0, 0x0, 0x1},
- {0xc9b8, 0x30, 0x0, 0x0, 0x10},
- {0xed90, 0x28, 0x0, 0x0, 0x28},
- {0xad20, 0x18, 0x0, 0x0, 0x18},
- {0xaea0, 0x8, 0x0, 0x0, 0x8},
- {0x13c38, 0x8, 0x0, 0x0, 0x8},
- {0x13c50, 0x18, 0x0, 0x0, 0x18},
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+
+/* IRO Array */
+static const u32 iro_arr[] = {
+ 0x00000000, 0x00000000, 0x00080000,
+ 0x00003288, 0x00000088, 0x00880000,
+ 0x000058e8, 0x00000020, 0x00200000,
+ 0x00000b00, 0x00000008, 0x00040000,
+ 0x00000a80, 0x00000008, 0x00040000,
+ 0x00000000, 0x00000008, 0x00020000,
+ 0x00000080, 0x00000008, 0x00040000,
+ 0x00000084, 0x00000008, 0x00020000,
+ 0x00005718, 0x00000004, 0x00040000,
+ 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00003e40, 0x00000000, 0x00780000,
+ 0x00004480, 0x00000000, 0x00780000,
+ 0x00003210, 0x00000000, 0x00780000,
+ 0x00003b50, 0x00000000, 0x00780000,
+ 0x00007f58, 0x00000000, 0x00780000,
+ 0x00005f58, 0x00000000, 0x00080000,
+ 0x00007100, 0x00000000, 0x00080000,
+ 0x0000aea0, 0x00000000, 0x00080000,
+ 0x00004398, 0x00000000, 0x00080000,
+ 0x0000a5a0, 0x00000000, 0x00080000,
+ 0x0000bde8, 0x00000000, 0x00080000,
+ 0x00000020, 0x00000004, 0x00040000,
+ 0x000056c8, 0x00000010, 0x00100000,
+ 0x0000c210, 0x00000030, 0x00300000,
+ 0x0000b088, 0x00000038, 0x00380000,
+ 0x00003d20, 0x00000080, 0x00400000,
+ 0x0000bf60, 0x00000000, 0x00040000,
+ 0x00004560, 0x00040080, 0x00040000,
+ 0x000001f8, 0x00000004, 0x00040000,
+ 0x00003d60, 0x00000080, 0x00200000,
+ 0x00008960, 0x00000040, 0x00300000,
+ 0x0000e840, 0x00000060, 0x00600000,
+ 0x00004618, 0x00000080, 0x00380000,
+ 0x00010738, 0x000000c0, 0x00c00000,
+ 0x000001f8, 0x00000002, 0x00020000,
+ 0x0000a2a0, 0x00000000, 0x01080000,
+ 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x000001c0, 0x00000008, 0x00080000,
+ 0x000001f8, 0x00000008, 0x00080000,
+ 0x00000ac0, 0x00000008, 0x00080000,
+ 0x00002578, 0x00000008, 0x00080000,
+ 0x000024f8, 0x00000008, 0x00080000,
+ 0x00000280, 0x00000008, 0x00080000,
+ 0x00000680, 0x00080018, 0x00080000,
+ 0x00000b78, 0x00080018, 0x00020000,
+ 0x0000c640, 0x00000050, 0x003c0000,
+ 0x00012038, 0x00000018, 0x00100000,
+ 0x00011b00, 0x00000040, 0x00180000,
+ 0x000095d0, 0x00000050, 0x00200000,
+ 0x00008b10, 0x00000040, 0x00280000,
+ 0x00011640, 0x00000018, 0x00100000,
+ 0x0000c828, 0x00000048, 0x00380000,
+ 0x00011710, 0x00000020, 0x00200000,
+ 0x00004650, 0x00000080, 0x00100000,
+ 0x00003618, 0x00000010, 0x00100000,
+ 0x0000a968, 0x00000008, 0x00010000,
+ 0x000097a0, 0x00000008, 0x00010000,
+ 0x00011990, 0x00000008, 0x00010000,
+ 0x0000f018, 0x00000008, 0x00010000,
+ 0x00012628, 0x00000008, 0x00010000,
+ 0x00011da8, 0x00000008, 0x00010000,
+ 0x0000aa78, 0x00000030, 0x00100000,
+ 0x0000d768, 0x00000028, 0x00280000,
+ 0x00009a58, 0x00000018, 0x00180000,
+ 0x00009bd8, 0x00000008, 0x00080000,
+ 0x00013a18, 0x00000008, 0x00080000,
+ 0x000126e8, 0x00000018, 0x00180000,
+ 0x0000e608, 0x00500288, 0x00100000,
+ 0x00012970, 0x00000138, 0x00280000,
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
-#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
-#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
-#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
-#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
-#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
-#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
-#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
-#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
-#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
-#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
-#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6527
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6529
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
-#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
-#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
-#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
-#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
-#define QM_REG_PTRTBLOTHER_RT_SIZE 256
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
-#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
-#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
-#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
-#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
-#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
-#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
-#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
-#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
-#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
-#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
-#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
-#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
-#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
-#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
-#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
-#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
-#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
-#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
-#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
-#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
-#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
-#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
-#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
-#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
-#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
-#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
-#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
-#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
-#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
-#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
-#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
-#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
-#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
-#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
-#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
-#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
-#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
-#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
-#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
-#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
-#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
-#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
-#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
-#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
-#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
-#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
-#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
-#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
-#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
-#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
-#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
-#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
-#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
-#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
-#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
-#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
-#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
-#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
-#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
-#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
-#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
-#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
-#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
-#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
-#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
-#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 35389
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 35405
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 35439
-#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
-#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 36209
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 37233
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 37745
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 38257
-#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
-#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
-
-#define RUNTIME_ARRAY_SIZE 43022
-
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 1498
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929
+#define SRC_REG_FIRSTFREE_RT_OFFSET 5930
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 5932
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 5934
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29358
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29425
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29426
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29427
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29428
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29429
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29430
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29431
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29432
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29433
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29434
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29435
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29436
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29437
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29438
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29439
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29440
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29441
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29442
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29443
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29444
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29445
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29446
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29447
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29448
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29449
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29450
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29451
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29452
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29453
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29454
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29455
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29456
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29457
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29458
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29459
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29460
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29461
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29462
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29463
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29464
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29465
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29466
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29467
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29468
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29469
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29470
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29471
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29472
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29473
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29474
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29475
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29476
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29477
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29478
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29479
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29480
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29481
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29482
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29483
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29484
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29485
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29486
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29487
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29488
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30029
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30286
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30288
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30320
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30336
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30370
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 30530
+#define QM_REG_WFQVPENABLE_RT_OFFSET 30531
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31044
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
+
+#define RUNTIME_ARRAY_SIZE 34472
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -5648,9 +5450,9 @@ struct e4_eth_conn_context {
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
@@ -5680,6 +5482,16 @@ enum eth_error_code {
ETH_FILTERS_VNI_ADD_FAIL_FULL,
ETH_FILTERS_VNI_ADD_FAIL_DUP,
ETH_FILTERS_GFT_UPDATE_FAIL,
+ ETH_RX_QUEUE_FAIL_LOAD_VF_DATA,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_ADD_FINLER_FAIL_MAGIC_NUM_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR,
MAX_ETH_ERROR_CODE
};
@@ -5703,6 +5515,11 @@ enum eth_event_opcode {
ETH_EVENT_RX_CREATE_GFT_ACTION,
ETH_EVENT_RX_GFT_UPDATE_FILTER,
ETH_EVENT_TX_QUEUE_UPDATE,
+ ETH_EVENT_RGFS_ADD_FILTER,
+ ETH_EVENT_RGFS_DEL_FILTER,
+ ETH_EVENT_TGFS_ADD_FILTER,
+ ETH_EVENT_TGFS_DEL_FILTER,
+ ETH_EVENT_GFS_COUNTERS_REPORT_REQUEST,
MAX_ETH_EVENT_OPCODE
};
@@ -5795,18 +5612,31 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_CREATE_GFT_ACTION,
ETH_RAMROD_GFT_UPDATE_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
+ ETH_RAMROD_RGFS_FILTER_ADD,
+ ETH_RAMROD_RGFS_FILTER_DEL,
+ ETH_RAMROD_TGFS_FILTER_ADD,
+ ETH_RAMROD_TGFS_FILTER_DEL,
+ ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST,
MAX_ETH_RAMROD_CMD_ID
};
/* Return code from eth sp ramrods */
struct eth_return_code {
u8 value;
-#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
-#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
-#define ETH_RETURN_CODE_RESERVED_MASK 0x3
-#define ETH_RETURN_CODE_RESERVED_SHIFT 5
-#define ETH_RETURN_CODE_RX_TX_MASK 0x1
-#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x1
+#define ETH_RETURN_CODE_RESERVED_SHIFT 6
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* tx destination enum */
+enum eth_tx_dst_mode_config_enum {
+ ETH_TX_DST_MODE_CONFIG_DISABLE,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT,
+ MAX_ETH_TX_DST_MODE_CONFIG_ENUM
};
/* What to do in case an error occurs */
@@ -5833,8 +5663,10 @@ struct eth_tx_err_vals {
#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
-#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT 7
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0xFF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 8
};
/* vport rss configuration data */
@@ -5864,7 +5696,6 @@ struct eth_vport_rss_config {
u8 tbl_size;
__le32 reserved2[2];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
-
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
__le32 reserved3[2];
};
@@ -6066,7 +5897,7 @@ struct rx_update_gft_filter_data {
u8 inner_vlan_removal_en;
};
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for tx queue start ramrod */
struct tx_queue_start_ramrod_data {
__le16 sb_id;
u8 sb_index;
@@ -6079,16 +5910,14 @@ struct tx_queue_start_ramrod_data {
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 3
#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x7
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 5
u8 pxp_st_hint;
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
@@ -6144,18 +5973,22 @@ struct vport_start_ramrod_data {
__le16 default_vlan;
u8 tx_switching_en;
u8 anti_spoofing_en;
-
u8 default_vlan_en;
-
u8 handle_ptp_pkts;
u8 silent_vlan_removal_en;
u8 untagged;
struct eth_tx_err_vals tx_err_behav;
-
u8 zero_placement_offset;
u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en;
+ u8 reserved0;
+ u8 reserved1;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
u8 wipe_inner_vlan_pri_en;
+ u8 reserved2[2];
struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
};
@@ -6715,19 +6548,6 @@ struct e4_xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
-/* GFT CAM line struct */
-struct gft_cam_line {
- __le32 camline;
-#define GFT_CAM_LINE_VALID_MASK 0x1
-#define GFT_CAM_LINE_VALID_SHIFT 0
-#define GFT_CAM_LINE_DATA_MASK 0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT 1
-#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
-};
-
/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {
__le32 camline;
@@ -6757,10 +6577,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-union gft_cam_line_union {
- struct gft_cam_line cam_line;
- struct gft_cam_line_mapped cam_line_mapped;
-};
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
@@ -7039,6 +6855,11 @@ struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[6];
+};
+
struct e4_ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
@@ -7048,8 +6869,8 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
@@ -7079,29 +6900,29 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
- __le32 sq_cons;
- __le32 dif_runt_value;
+ __le32 dif_rxmit_cons;
+ __le32 dif_rxmit_prod;
__le32 sge_index;
- __le32 reg5;
+ __le32 sq_cons;
u8 byte2;
u8 byte3;
- __le16 word1;
- __le16 word2;
+ __le16 dif_write_cons;
+ __le16 dif_write_prod;
__le16 word3;
- __le32 reg6;
- __le32 reg7;
+ __le32 dif_error_buffer_address_lo;
+ __le32 dif_error_buffer_address_hi;
};
/* RDMA task context */
@@ -7112,6 +6933,8 @@ struct e4_rdma_task_context {
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
};
@@ -7147,7 +6970,12 @@ struct rdma_create_cq_ramrod_data {
u8 pbl_log_page_size;
u8 toggle_bit;
__le16 int_timeout;
- __le16 reserved1;
+ u8 vf_id;
+ u8 flags;
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 0
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_MASK 0x7F
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_SHIFT 1
};
/* rdma deregister tid ramrod data */
@@ -7191,6 +7019,7 @@ enum rdma_fw_return_code {
RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
RDMA_RETURN_RESIZE_CQ_ERR,
RDMA_RETURN_NIG_DRAIN_REQ,
+ RDMA_RETURN_GENERAL_ERR,
MAX_RDMA_FW_RETURN_CODE
};
@@ -7204,7 +7033,10 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- __le32 reserved;
+ u8 searcher_mode;
+ u8 pvrdma_mode;
+ u8 max_num_ns_log;
+ u8 reserved;
};
/* rdma function init ramrod data */
@@ -7294,16 +7126,20 @@ struct rdma_resize_cq_ramrod_data {
#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 pbl_log_page_size;
__le16 pbl_num_pages;
__le32 max_cqes;
struct regpair pbl_addr;
struct regpair output_params_addr;
+ u8 vf_id;
+ u8 reserved1[7];
};
-/* The rdma storm context of Mstorm */
+/* The rdma SRQ context */
struct rdma_srq_context {
struct regpair temp[8];
};
@@ -7350,6 +7186,7 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
+/* The rdma XRC SRQ context */
struct rdma_xrc_srq_context {
struct regpair temp[9];
};
@@ -7531,12 +7368,12 @@ struct e4_xstorm_roce_conn_ag_ctx {
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
@@ -7860,9 +7697,9 @@ struct mstorm_roce_conn_st_ctx {
struct regpair temp[6];
};
-/* The roce storm context of Ystorm */
+/* The roce storm context of Ustorm */
struct ustorm_roce_conn_st_ctx {
- struct regpair temp[12];
+ struct regpair temp[14];
};
/* roce connection context */
@@ -7880,6 +7717,7 @@ struct e4_roce_conn_context {
struct mstorm_roce_conn_st_ctx mstorm_st_context;
struct regpair mstorm_st_padding[2];
struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
};
/* roce cqes statistics */
@@ -7934,12 +7772,17 @@ struct roce_create_qp_req_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
u8 stats_counter_id;
- u8 reserved3[6];
+ u8 vf_id;
+ u8 vport_id;
u8 flags2;
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+ u8 name_space;
+ u8 reserved3[3];
__le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -7967,8 +7810,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7995,10 +7840,14 @@ struct roce_create_qp_resp_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
__le16 low_latency_phy_queue;
- u8 reserved2[2];
+ u8 vf_id;
+ u8 vport_id;
__le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved3[3];
};
/* roce DCQCN received statistics */
@@ -8032,6 +7881,8 @@ struct roce_destroy_qp_resp_output_params {
/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
+ __le32 src_qp_id;
+ __le32 reserved;
};
/* roce error statistics */
@@ -8115,8 +7966,8 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
@@ -8162,8 +8013,8 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
@@ -8204,7 +8055,7 @@ struct roce_query_qp_req_ramrod_data {
/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {
__le32 psn;
- __le32 err_flag;
+ __le32 flags;
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
@@ -8271,12 +8122,12 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
u8 flags2;
@@ -8649,8 +8500,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
u8 flags5;
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
@@ -8663,13 +8514,13 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
+ __le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
__le32 orq_prod;
__le32 reg4;
- __le32 reg5;
- __le32 reg6;
+ __le32 dif_acked_cnt;
+ __le32 dif_cnt;
__le32 reg7;
__le32 reg8;
u8 tx_cqe_error_type;
@@ -8680,7 +8531,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le16 snd_sq_cons;
__le16 conn_dpi;
__le16 force_comp_cons;
- __le32 reg9;
+ __le32 dif_rxmit_acked_cnt;
__le32 reg10;
};
@@ -8955,10 +8806,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -9184,10 +9035,10 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -9914,7 +9765,7 @@ struct mstorm_iwarp_conn_st_ctx {
/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {
- __le32 reserved[24];
+ struct regpair reserved[14];
};
/* iwarp connection context */
@@ -9932,6 +9783,7 @@ struct e4_iwarp_conn_context {
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
};
/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
@@ -9984,7 +9836,8 @@ enum iwarp_eqe_async_opcode {
struct iwarp_eqe_data_mpa_async_completion {
__le16 ulp_data_len;
- u8 reserved[6];
+ u8 rtr_type_sent;
+ u8 reserved[5];
};
struct iwarp_eqe_data_tcp_async_completion {
@@ -10009,7 +9862,7 @@ enum iwarp_eqe_sync_opcode {
/* iWARP EQE completion status */
enum iwarp_fw_return_code {
- IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 6,
IWARP_CONN_ERROR_TCP_CONNECTION_RST,
IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
IWARP_CONN_ERROR_MPA_ERROR_REJECT,
@@ -10178,8 +10031,8 @@ struct iwarp_rxmit_stats_drv {
* offload ramrod.
*/
struct iwarp_tcp_offload_ramrod_data {
- struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp;
+ struct iwarp_offload_params iwarp;
};
/* iWARP MPA negotiation types */
@@ -11471,8 +11324,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
u8 flags3;
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
@@ -11494,8 +11347,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
@@ -11727,7 +11580,7 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
/* The trace in the buffer */
#define MFW_TRACE_EVENTID_MASK 0x00ffff
#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
#define MFW_TRACE_ENTRY_SIZE 3
struct mcp_trace {
@@ -12485,6 +12338,11 @@ enum resource_id_enum {
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -12675,7 +12533,10 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
@@ -13436,6 +13297,21 @@ enum nvm_image_type {
NVM_TYPE_FCOE_CFG = 0x1f,
NVM_TYPE_ETH_PHY_FW1 = 0x20,
NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
NVM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a4de9e3ef72c..4ab8cfaf63d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
/* DMAE */
#define QED_DMAE_FLAGS_IS_SET(params, flag) \
- ((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
+ ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
@@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
- opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
- : DMAE_CMD_SRC_MASK_PCIE) <<
- DMAE_CMD_SRC_SHIFT;
- src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
- p_params->src_pfid : p_hwfn->rel_pf_id;
- opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
- DMAE_CMD_SRC_PF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC,
+ (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
+ src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
- opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
- : DMAE_CMD_DST_MASK_PCIE) <<
- DMAE_CMD_DST_SHIFT;
- dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
- p_params->dst_pfid : p_hwfn->rel_pf_id;
- opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
- DMAE_CMD_DST_PF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_DST,
+ (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
+ dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
+
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
- opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
- opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
- DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
- opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
- opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+ /* swapping mode 3 - big endian */
+ SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
- port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
- p_params->port_id : p_hwfn->port_id;
- opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
+ port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
+ p_params->port_id : p_hwfn->port_id;
+ SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
/* reset source address in next go */
- opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
- DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
/* reset dest address in next go */
- opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
- DMAE_CMD_DST_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
- opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
} else {
- opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
}
-
- if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
- opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
} else {
- opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d6430dfebd83..2f1049b0b93a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -44,9 +44,9 @@
#define CDU_VALIDATION_DEFAULT_CFG 61
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
- {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+ {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
@@ -61,6 +61,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
+/* Max link speed (in Mbps) */
+#define QM_MAX_LINK_SPEED 100000
+
/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
@@ -128,8 +131,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
-
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -140,6 +141,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+/* Returns the VOQ line credit for the specified number of PBF command lines.
+ * PBF lines are specified in 256b units.
+ */
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
@@ -178,14 +182,14 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
ext_voq, wrr) \
do { \
typeof(map) __map; \
memset(&__map, 0, sizeof(__map)); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
- rl_valid); \
+ rl_valid ? 1 : 0);\
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
vp_pq_id); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
@@ -200,9 +204,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define WRITE_PQ_INFO_TO_RAM 1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
- ((rl_valid) << 22) | ((rl) << 24))
+ ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
+ (((rl) >> 8) << 9))
+
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+ XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id)
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -228,9 +235,6 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
STORE_RT_REG(p_hwfn,
QM_REG_RLPFVOQENABLE_RT_OFFSET,
(u32)voq_bit_mask);
- if (num_ext_voqs >= 32)
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
- (u32)(voq_bit_mask >> 32));
/* Write RL period */
STORE_RT_REG(p_hwfn,
@@ -259,12 +263,12 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
QM_WFQ_UPPER_BOUND);
}
-/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
+/* Prepare global RL enable/disable runtime init values */
+static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
- vport_rl_en ? 1 : 0);
- if (vport_rl_en) {
+ global_rl_en ? 1 : 0);
+ if (global_rl_en) {
/* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
@@ -331,8 +335,7 @@ static void qed_cmdq_lines_rt_init(
continue;
/* Find number of command queue lines to divide between the
- * active physical TCs. In E5, 1/8 of the lines are reserved.
- * the lines for pure LB TC are subtracted.
+ * active physical TCs.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
@@ -361,11 +364,30 @@ static void qed_cmdq_lines_rt_init(
ext_voq = qed_get_ext_voq(p_hwfn,
port_id,
PURE_LB_TC, max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn,
- ext_voq, PBF_CMDQ_PURE_LB_LINES);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ PBF_CMDQ_PURE_LB_LINES);
}
}
+/* Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B - BTB blocks for this port
+ * C - Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom.
+ * b. B = B - 38 (remainder after global headroom allocation).
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space
+ * is not enabled and allocated for each TC.
+ */
static void qed_btb_blocks_rt_init(
struct qed_hwfn *p_hwfn,
u8 max_ports_per_engine,
@@ -424,6 +446,34 @@ static void qed_btb_blocks_rt_init(
}
}
+/* Prepare runtime init values for the specified RL.
+ * Set max link speed (100Gbps) per rate limiter.
+ * Return -1 on error.
+ */
+static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT;
+ u32 inc_val;
+ u16 rl_id;
+
+ /* Go over all global RLs */
+ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
+ inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -460,18 +510,17 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
- u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u16 *p_first_tx_pq_id, vport_id_in_pf;
struct qm_rf_pq_map_e4 tx_pq_map;
- bool is_vf_pq, rl_valid;
- u16 *p_first_tx_pq_id;
+ u8 tc_id = pq_params[i].tc_id;
+ bool is_vf_pq;
+ u8 ext_voq;
ext_voq = qed_get_ext_voq(p_hwfn,
pq_params[i].port_id,
tc_id,
p_params->max_phys_tcs_per_port);
is_vf_pq = (i >= p_params->num_pf_pqs);
- rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
@@ -492,21 +541,14 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
map_val);
}
- /* Check RL ID */
- if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- rl_valid = false;
- }
-
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
E4,
pq_id,
- rl_valid ? 1 : 0,
*p_first_tx_pq_id,
- rl_valid ? pq_params[i].vport_id : 0,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id,
ext_voq, pq_params[i].wrr_group);
/* Set PQ base address */
@@ -529,9 +571,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
p_params->pf_id,
tc_id,
pq_params[i].port_id,
- rl_valid ? 1 : 0,
- rl_valid ?
- pq_params[i].vport_id : 0);
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id);
qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
pq_info);
}
@@ -669,19 +710,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
- u8 num_vports,
+ u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id;
+ u16 vport_pq_id, i;
u32 inc_val;
- u8 tc, i;
+ u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].vport_wfq)
+ if (!vport_params[i].wfq)
continue;
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
"Invalid VPORT WFQ weight configuration\n");
@@ -706,48 +747,6 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
-/* Prepare VPORT RL runtime init values for the specified VPORTs.
- * Return -1 on error.
- */
-static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 start_vport,
- u8 num_vports,
- u32 link_speed,
- struct init_qm_vport_params *vport_params)
-{
- u8 i, vport_id;
- u32 inc_val;
-
- if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- return -1;
- }
-
- /* Go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
- vport_params[i].vport_rl :
- link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_VP_RL_UPPER_BOUND(link_speed) |
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
- inc_val);
- }
-
- return 0;
-}
-
static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -799,23 +798,20 @@ u32 qed_qm_pf_mem_size(u32 num_pf_cids,
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params)
{
- /* Init AFullOprtnstcCrdMask */
- u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (p_params->pf_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (p_params->vport_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (p_params->pf_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (p_params->vport_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask = 0;
+ /* Init AFullOprtnstcCrdMask */
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
+ QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
+ p_params->global_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
+ SET_FIELD(mask,
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* Enable/disable PF RL */
@@ -824,8 +820,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
/* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
- /* Enable/disable VPORT RL */
- qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ /* Enable/disable global RL */
+ qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
/* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
@@ -842,6 +838,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+ qed_global_rl_rt_init(p_hwfn);
+
return 0;
}
@@ -853,7 +851,9 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
p_params->num_tids) *
QM_OTHER_PQS_PER_PF;
- u8 tc, i;
+ u16 i;
+ u8 tc;
+
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
@@ -878,16 +878,10 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
- /* Set VPORT WFQ */
+ /* Init VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
- /* Set VPORT RL */
- if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, p_params->link_speed,
- vport_params))
- return -1;
-
return 0;
}
@@ -925,18 +919,19 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
u16 vport_pq_id;
u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
+ /* A VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
@@ -948,28 +943,20 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed)
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
{
- u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u32 inc_val;
- if (vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
+ inc_val = QM_RL_INC_VAL(rate_limit);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
+ DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
return -1;
}
- inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
- qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
}
@@ -1013,7 +1000,6 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
do { \
typeof(var) *__p_var = &(var); \
@@ -1021,8 +1007,59 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
*__p_var = (*__p_var & ~BIT(__offset)) | \
((enable) ? BIT(__offset) : 0); \
} while (0)
-#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
-#define PRS_ETH_OUTPUT_FORMAT -46832
+
+#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
+#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, \
+ ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
+ } while (0)
+
+/**
+ * @brief qed_dmae_to_grc - is an internal function - writes from host to
+ * wide-bus registers (split registers are not supported yet)
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param p_data - pointer to source data.
+ * @param addr - Destination register address.
+ * @param len_in_dwords - data length in DWARDS (u32)
+ */
+static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_data, u32 addr, u32 len_in_dwords)
+{
+ struct qed_dmae_params params = {};
+ int rc;
+
+ if (!p_data)
+ return -1;
+
+ /* Set DMAE params */
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+
+ /* Execute DMAE command */
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)(p_data),
+ addr, len_in_dwords, &params);
+
+ /* If not read using DMAE, read using GRC */
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed writing to chip using DMAE, using GRC instead\n");
+ /* write to registers using GRC */
+ ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
+ }
+
+ return len_in_dwords;
+}
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
@@ -1166,8 +1203,8 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
-#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,6 +1245,8 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
+ struct regpair ram_line = { };
+
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1217,12 +1256,9 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- qed_wr(p_hwfn,
- p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- 0);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
}
void qed_gft_config(struct qed_hwfn *p_hwfn,
@@ -1232,7 +1268,8 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
+ u32 reg_val, cam_line, search_non_ip_as_gft;
+ struct regpair ram_line = { };
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1298,35 +1335,33 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ram_line_lo = 0;
- ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
@@ -1334,24 +1369,17 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn,
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ram_line_lo);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- ram_line_hi);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+ ram_line.lo = 0xffffffff;
+ ram_line.hi = 0x3ff;
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH,
+ sizeof(ram_line) / REG_SIZE);
/* Enable gft search */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
@@ -1544,3 +1572,144 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
}
}
+
+#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
+#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+
+static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_OVERLAY_BUF_ADDR_OFFSET;
+
+ default:
+ return 0;
+ }
+}
+
+struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const
+ fw_overlay_in_buf,
+ u32 buf_size_in_bytes)
+{
+ u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
+ struct phys_mem_desc *allocated_mem;
+
+ if (!buf_size)
+ return NULL;
+
+ allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!allocated_mem)
+ return NULL;
+
+ memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
+
+ /* For each Storm, set physical address in RAM */
+ while (buf_offset < buf_size) {
+ struct phys_mem_desc *storm_mem_desc;
+ struct fw_overlay_buf_hdr *hdr;
+ u32 storm_buf_size;
+ u8 storm_id;
+
+ hdr =
+ (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
+ storm_buf_size = GET_FIELD(hdr->data,
+ FW_OVERLAY_BUF_HDR_BUF_SIZE);
+ storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ storm_mem_desc = allocated_mem + storm_id;
+ storm_mem_desc->size = storm_buf_size * sizeof(u32);
+
+ /* Allocate physical memory for Storm's overlays buffer */
+ storm_mem_desc->virt_addr =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ &storm_mem_desc->phys_addr, GFP_KERNEL);
+ if (!storm_mem_desc->virt_addr)
+ break;
+
+ /* Skip overlays buffer header */
+ buf_offset += OVERLAY_HDR_SIZE_DWORDS;
+
+ /* Copy Storm's overlays buffer to allocated memory */
+ memcpy(storm_mem_desc->virt_addr,
+ &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
+
+ /* Advance to next Storm */
+ buf_offset += storm_buf_size;
+ }
+
+ /* If memory allocation has failed, free all allocated memory */
+ if (buf_offset < buf_size) {
+ qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ return NULL;
+ }
+
+ return allocated_mem;
+}
+
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ u32 ram_addr, i;
+
+ /* Skip Storms with no FW overlays */
+ if (!storm_mem_desc->virt_addr)
+ continue;
+
+ /* Calculate overlay RAM GRC address of current PF */
+ ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
+ sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
+
+ /* Write Storm's overlay physical address to RAM */
+ for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, ram_addr,
+ ((u32 *)&storm_mem_desc->phys_addr)[i]);
+ }
+}
+
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ if (!fw_overlay_mem)
+ return;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+
+ /* Free Storm's physical memory */
+ if (storm_mem_desc->virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ storm_mem_desc->virt_addr,
+ storm_mem_desc->phys_addr);
+ }
+
+ /* Free allocated virtual memory */
+ kfree(fw_overlay_mem);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index a868d7f88601..5a6e4ac4fef4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -54,15 +54,15 @@ static u32 pxp_global_win[] = {
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
- 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
- 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
- 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
- 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
- 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
- 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
- 0,
- 0,
- 0,
+ 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
+ 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
+ 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
+ 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
+ 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
0,
0,
0,
@@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev)
cdev->iro_arr = iro_arr;
}
-/* Runtime configuration helpers */
-void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
-{
- int i;
-
- for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
- p_hwfn->rt_data.b_valid[i] = false;
-}
-
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
@@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- u16 i, segment;
+ u16 i, j, segment;
int rc = 0;
/* Since not all RT entries are initialized, go over the RT and
@@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ p_valid[i] = false;
continue;
}
@@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* invalidate after writing */
+ for (j = i; j < i + segment; j++)
+ p_valid[j] = false;
+
/* Jump over the entire segment, including invalid entry */
i += segment;
}
@@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
- params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count, &params);
@@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
+ bool b_dmae = (phase != PHASE_ENGINE);
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
- bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
@@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
- b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
@@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_CALLBACK:
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ if (phase == PHASE_ENGINE &&
+ cmd->callback.callback_id == DMAE_READY_CB)
+ b_dmae = true;
break;
}
@@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
+ offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
+ fw->fw_overlays = (u32 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
+ fw->fw_overlays_len = len;
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index 555dd086796d..e9e8ade50ed3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -81,14 +81,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn);
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_clear_rt_data - Clears the runtime init array.
- *
- *
- * @param p_hwfn
- */
-void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
-
-/**
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
*
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 5585c18053ec..7245a615517a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- SET_FIELD(p_init->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
- p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
-
val = p_params->half_way_close_timeout;
p_init->half_way_close_timeout = cpu_to_le16(val);
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
- p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
- p_params->ll2_ooo_queue_id;
+ p_init->ll2_rx_queue_id =
+ p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
+ p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
@@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_conn->physical_q1 = cpu_to_le16(physical_q);
p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
- SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
- p_conn->layer_code);
-
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length;
@@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
ucval = p_conn->remote_mac[1];
((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
ucval = p_conn->remote_mac[0];
@@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->abortive = p_conn->abortive_dsconnect;
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
@@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct iscsi_slow_path_hdr *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
@@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.iscsi_empty;
- p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
- SET_FIELD(p_ramrod->flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
-
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct iscsi_spe_func_dstry *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
@@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.iscsi_destroy;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
-
rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 65ec16a31658..d2fe61a5cf56 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{
p_ramrod->iwarp.ll2_ooo_q_index =
- RESC_START(p_hwfn, QED_LL2_QUEUE) +
- p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
@@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
+ /* SYN will use ctx based queues */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
@@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
/* Start OOO connection */
data.input.conn_type = QED_LL2_TYPE_OOO;
+ /* OOO/unaligned will use legacy ll2 queues (ram based) */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
data.input.mtu = params->max_mtu;
n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 19a1a58d60f8..037e5978787e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -962,7 +962,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.core_rx_queue_start;
-
+ memset(p_ramrod, 0, sizeof(*p_ramrod));
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
@@ -996,6 +996,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->action_on_error.error_type = action_on_error;
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+ p_ramrod->zero_prod_flg = 1;
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1317,6 +1319,25 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
return 0;
}
+static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_acquire_data *data,
+ u8 *start_idx, u8 *last_idx)
+{
+ /* LL2 queues handles will be split as follows:
+ * First will be the legacy queues, and then the ctx based.
+ */
+ if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ *start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
+ } else {
+ /* QED_LL2_RX_TYPE_CTX */
+ *start_idx = QED_LL2_CTX_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
+ }
+}
+
static enum core_error_handle
qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
{
@@ -1337,14 +1358,16 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL;
- u8 i, *p_tx_max;
+ u8 i, first_idx, last_idx, *p_tx_max;
int rc;
if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL;
+ _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
+
/* Find a free connection to be used */
- for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ for (i = first_idx; i < last_idx; i++) {
mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
if (p_hwfn->p_ll2_info[i].b_active) {
mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
@@ -1448,6 +1471,7 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
enum qed_ll2_error_handle error_input;
enum core_error_handle error_mode;
u8 action_on_error = 0;
+ int rc;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@@ -1461,7 +1485,18 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
- return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ if (rc)
+ return rc;
+
+ if (p_ll2_conn->rx_queue.ctx_based) {
+ rc = qed_db_recovery_add(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ }
+
+ return rc;
}
static void
@@ -1475,13 +1510,41 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
+static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
+ u8 handle,
+ u8 ll2_queue_type)
+{
+ u8 qid;
+
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
+
+ /* QED_LL2_RX_TYPE_CTX
+ * FW distinguishes between the legacy queues (ram based) and the
+ * ctx based queues by the queue_id.
+ * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
+ * and the queue ids above that are ctx base.
+ */
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
+ MAX_NUM_LL2_RX_RAM_QUEUES;
+
+ /* See comment on the acquire connection for how the ll2
+ * queues handles are divided.
+ */
+ qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
+
+ return qid;
+}
+
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct qed_hwfn *p_hwfn = cxt;
- struct qed_ll2_info *p_ll2_conn;
+ struct e4_core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
+ struct qed_cxt_info cxt_info;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
@@ -1539,13 +1602,46 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
if (rc)
goto out;
+ cxt_info.iid = p_ll2_conn->cid;
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_ll2_conn->cid);
+ goto out;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ memset(p_cxt, 0, sizeof(*p_cxt));
- qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
+ p_ll2_conn->input.rx_conn_type);
p_ll2_conn->queue_id = qid;
p_ll2_conn->tx_stats_id = qid;
- p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_LL2_RX_PRODS_OFFSET(qid);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
+ p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+
+ if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ p_rx->set_prod_addr = p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ } else {
+ /* QED_LL2_RX_TYPE_CTX - using doorbell */
+ p_rx->ctx_based = 1;
+
+ p_rx->set_prod_addr = p_hwfn->doorbells +
+ p_hwfn->dpi_start_offset +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
+
+ /* prepare db data */
+ p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
+ }
+
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(p_ll2_conn->cid,
DQ_DEMS_LEGACY);
@@ -1556,7 +1652,6 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
DQ_XCM_CORE_TX_BD_PROD_CMD);
p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
goto out;
@@ -1590,7 +1685,7 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
struct qed_ll2_rx_packet *p_curp)
{
struct qed_ll2_rx_packet *p_posting_packet = NULL;
- struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ struct core_ll2_rx_prod rx_prod = { 0, 0 };
bool b_notify_fw = false;
u16 bd_prod, cq_prod;
@@ -1615,13 +1710,27 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
- rx_prod.bd_prod = cpu_to_le16(bd_prod);
- rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ if (p_rx->ctx_based) {
+ /* update producer by giving a doorbell */
+ p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
+ p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
+ DIRECT_REG_WR64(p_rx->set_prod_addr,
+ *((u64 *)&p_rx->db_data));
+ } else {
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
- /* Make sure chain element is updated before ringing the doorbell */
- dma_wmb();
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
- DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ }
}
int qed_ll2_post_rx_buffer(void *cxt,
@@ -1965,6 +2074,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
p_ll2_conn->rx_queue.b_cb_registered = false;
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+
+ if (p_ll2_conn->rx_queue.ctx_based)
+ qed_db_recovery_del(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data);
+
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 5f01fbd3c073..288642d526b7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -46,6 +46,18 @@
#include "qed_sp.h"
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+/* LL2 queues handles will be split as follows:
+ * first will be legacy queues, and then the ctx based queues.
+ */
+#define QED_MAX_NUM_OF_LL2_CONNS_PF (4)
+#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3)
+
+#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \
+ (QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF)
+
+#define QED_LL2_LEGACY_CONN_BASE_PF 0
+#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
+
struct qed_ll2_rx_packet {
struct list_head list_entry;
@@ -79,6 +91,7 @@ struct qed_ll2_rx_queue {
struct qed_chain rxq_chain;
struct qed_chain rcq_chain;
u8 rx_sb_index;
+ u8 ctx_based;
bool b_cb_registered;
__le16 *p_fw_cons;
struct list_head active_descq;
@@ -86,6 +99,7 @@ struct qed_ll2_rx_queue {
struct list_head posting_descq;
struct qed_ll2_rx_packet *descq_array;
void __iomem *set_prod_addr;
+ struct core_pwm_prod_update_data db_data;
};
struct qed_ll2_tx_queue {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 38f7f40b3a4d..2c189c637cca 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -2637,7 +2637,7 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
if (!ptt)
return -EAGAIN;
- rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
+ rc = qed_dbg_grc_config(hwfn, cfg_id, val);
qed_ptt_release(hwfn, ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 36ddb89856a8..280527cc0578 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,6 +48,8 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
+#define GRCBASE_MCP 0xe00000
+
#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -3165,6 +3167,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
break;
+ case QED_NVM_IMAGE_MDUMP:
+ type = NVM_TYPE_MDUMP;
+ break;
case QED_NVM_IMAGE_NVM_CFG1:
type = NVM_TYPE_NVM_CFG1;
break;
@@ -3261,9 +3266,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
- case QED_LL2_QUEUE:
+ case QED_LL2_RAM_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
break;
+ case QED_LL2_CTX_QUEUE:
+ mfw_res_id = RESOURCE_LL2_CQS_E;
+ break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 60f850c3bdd6..3dcb6ff58e73 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -178,6 +178,8 @@
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
#define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL
#define MISCS_REG_CHIP_NUM \
@@ -212,6 +214,8 @@
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
+#define DBG_REG_TIMESTAMP_VALID_EN \
+ 0x010b58UL
#define DMAE_REG_INIT \
0x00c000UL
#define DORQ_REG_IFEN \
@@ -350,6 +354,10 @@
0x24000cUL
#define PSWRQ2_REG_ILT_MEMORY \
0x260000UL
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \
+ 15200
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \
+ 22000
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -1453,6 +1461,8 @@
0x1401404UL
#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
+#define XSEM_REG_DBG_GPRE_VECT \
+ 0x1401410UL
#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
@@ -1465,6 +1475,8 @@
0x1501404UL
#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
+#define YSEM_REG_DBG_GPRE_VECT \
+ 0x1501410UL
#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
@@ -1479,6 +1491,8 @@
0x1601404UL
#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
+#define PSEM_REG_DBG_GPRE_VECT \
+ 0x1601410UL
#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
@@ -1493,6 +1507,8 @@
0x1701404UL
#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
+#define TSEM_REG_DBG_GPRE_VECT \
+ 0x1701410UL
#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
@@ -1507,12 +1523,16 @@
0x1801404UL
#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
+#define MSEM_REG_DBG_GPRE_VECT \
+ 0x1801410UL
#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
@@ -1521,14 +1541,26 @@
0x1901404UL
#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
+#define USEM_REG_DBG_GPRE_VECT \
+ 0x1901410UL
#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
+#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
+ 0x000748UL
+#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
+ 0x00074cUL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
+ 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE \
+ 0x000740UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE \
+ 0x000768UL
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
@@ -1583,14 +1615,20 @@
0x181530UL
#define DBG_REG_DBG_BLOCK_ON \
0x010454UL
+#define DBG_REG_FILTER_ENABLE \
+ 0x0109d0UL
#define DBG_REG_FRAMING_MODE \
0x010058UL
+#define DBG_REG_TRIGGER_ENABLE \
+ 0x01054cUL
#define SEM_FAST_REG_VFC_DATA_WR \
0x000b40UL
#define SEM_FAST_REG_VFC_ADDR \
0x000b44UL
#define SEM_FAST_REG_VFC_DATA_RD \
0x000b48UL
+#define SEM_FAST_REG_VFC_STATUS \
+ 0x000b4cUL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
#define RSS_REG_RSS_RAM_DATA_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index e49fada85410..37e70562a964 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
goto err_resp;
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
- rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 96ab77ae6af5..b7b4fbbbccfe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -120,9 +120,7 @@ union ramrod_data {
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
struct fcoe_stat_ramrod_params fcoe_stat;
- struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
- struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 7e0b795230b2..900bc603e30a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -331,8 +331,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
u8 page_cnt, i;
+ int rc;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -447,7 +447,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -471,7 +471,7 @@ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EOPNOTSUPP;
+ int rc;
if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
DP_INFO(p_hwfn, "Invalid priority type %d\n",
@@ -509,7 +509,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
@@ -546,7 +546,7 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index dcb5c917f373..66876af814c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -352,7 +352,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
/* propagate bulletin board via dmae to vm memory */
memset(&params, 0, sizeof(params));
- params.flags = QED_DMAE_FLAG_VF_DST;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = p_vf->abs_vf_id;
return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
@@ -1225,8 +1225,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
eng_vf_id = p_vf->abs_vf_id;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = QED_DMAE_FLAG_VF_DST;
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = eng_vf_id;
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
@@ -4103,8 +4103,9 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
if (!vf_info)
return -EINVAL;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
params.src_vfid = vf_info->abs_vf_id;
if (qed_dmae_host2host(p_hwfn, ptt,
@@ -4354,9 +4355,9 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
- struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
+ u16 rl_id;
int rc;
vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
@@ -4367,10 +4368,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
-
- return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
- p_link->speed);
+ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 004c0bfec41d..c6c20776b474 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -848,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev,
qede_set_gro_params(edev, tpa_info->skb, cqe);
cons_buf: /* We still need to handle bd_len_list to consume buffers */
- if (likely(cqe->ext_bd_len_list[0]))
+ if (likely(cqe->bw_ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
- le16_to_cpu(cqe->ext_bd_len_list[0]));
+ le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
- if (unlikely(cqe->ext_bd_len_list[1])) {
+ if (unlikely(cqe->bw_ext_bd_len_list[1])) {
DP_ERR(edev,
- "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index f815435cf106..4c7f7a7fc151 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -247,6 +247,7 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
DP_ERR(edev, "One-step timestamping is not supported\n");
return -ERANGE;
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 986f26578d34..0fade19e00d4 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3602,7 +3602,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-static void ql3xxx_tx_timeout(struct net_device *ndev)
+static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index a496390b8632..07f9067affc6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
break;
}
entry += p_hdr->size;
+ cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c07438db30ba..9dd6cb36f366 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -56,7 +56,7 @@ static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
static int qlcnic_close(struct net_device *netdev);
-static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
@@ -3068,7 +3068,7 @@ static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
}
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index afa10a163da1..f34ae8c75bc5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
addr += 16;
reg_read -= 16;
ret += 16;
+ cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
+ cond_resched();
}
fw_dump->clr = 1;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 98f92268cbaa..18b0c7a2d6dc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -282,25 +282,13 @@ static int emac_close(struct net_device *netdev)
}
/* Respond to a TX hang */
-static void emac_tx_timeout(struct net_device *netdev)
+static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct emac_adapter *adpt = netdev_priv(netdev);
schedule_work(&adpt->work_thread);
}
-/* IOCTL support for the interface */
-static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- if (!netdev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
/**
* emac_update_hw_stats - read the EMAC stat registers
*
@@ -387,7 +375,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = emac_change_mtu,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_get_stats64 = emac_get_stats64,
.ndo_set_features = emac_set_features,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index baac016f3ec0..5a3b65a6eb4f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -785,7 +785,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
}
static void
-qcaspi_netdev_tx_timeout(struct net_device *dev)
+qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qcaspi *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 0981068504fa..375a844cd27c 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -248,7 +248,7 @@ out:
return NETDEV_TX_OK;
}
-static void qcauart_netdev_tx_timeout(struct net_device *dev)
+static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qcauart *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 274e5b4bc4ac..f5ecc410ff85 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -410,7 +410,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(TM2TX, ioaddr + MTPR);
}
-static void r6040_tx_timeout(struct net_device *dev)
+static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
@@ -498,14 +498,6 @@ static int r6040_close(struct net_device *dev)
return 0;
}
-static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
@@ -957,7 +949,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_rx_mode = r6040_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = r6040_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4f910c4f67b0..60d342f82fb3 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1235,7 +1235,7 @@ static int cp_close (struct net_device *dev)
return 0;
}
-static void cp_tx_timeout(struct net_device *dev)
+static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 55d01266e615..5caeb8368eab 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -642,7 +642,7 @@ static int mdio_read (struct net_device *dev, int phy_id, int location);
static void mdio_write (struct net_device *dev, int phy_id, int location,
int val);
static void rtl8139_start_thread(struct rtl8139_private *tp);
-static void rtl8139_tx_timeout (struct net_device *dev);
+static void rtl8139_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void rtl8139_init_ring (struct net_device *dev);
static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
struct net_device *dev);
@@ -1700,7 +1700,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_bh(&tp->rx_lock);
}
-static void rtl8139_tx_timeout (struct net_device *dev)
+static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8139_private *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index d5304bad2372..2e1d78b106b0 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
-r8169-objs += r8169_main.o r8169_firmware.o
+r8169-objs += r8169_main.o r8169_firmware.o r8169_phy_config.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 58e0ca9093d3..9e3b35c97e63 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -204,7 +204,7 @@ static void net_rx(struct net_device *dev);
static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
static int net_close(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
/* A list of all installed ATP devices, for removing the driver module. */
@@ -533,7 +533,7 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad
outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
long ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
new file mode 100644
index 000000000000..22a6a057b11e
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* r8169.h: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/types.h>
+#include <linux/phy.h>
+
+enum mac_version {
+ /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
+ RTL_GIGA_MAC_VER_02,
+ RTL_GIGA_MAC_VER_03,
+ RTL_GIGA_MAC_VER_04,
+ RTL_GIGA_MAC_VER_05,
+ RTL_GIGA_MAC_VER_06,
+ RTL_GIGA_MAC_VER_07,
+ RTL_GIGA_MAC_VER_08,
+ RTL_GIGA_MAC_VER_09,
+ RTL_GIGA_MAC_VER_10,
+ RTL_GIGA_MAC_VER_11,
+ RTL_GIGA_MAC_VER_12,
+ RTL_GIGA_MAC_VER_13,
+ RTL_GIGA_MAC_VER_14,
+ RTL_GIGA_MAC_VER_15,
+ RTL_GIGA_MAC_VER_16,
+ RTL_GIGA_MAC_VER_17,
+ RTL_GIGA_MAC_VER_18,
+ RTL_GIGA_MAC_VER_19,
+ RTL_GIGA_MAC_VER_20,
+ RTL_GIGA_MAC_VER_21,
+ RTL_GIGA_MAC_VER_22,
+ RTL_GIGA_MAC_VER_23,
+ RTL_GIGA_MAC_VER_24,
+ RTL_GIGA_MAC_VER_25,
+ RTL_GIGA_MAC_VER_26,
+ RTL_GIGA_MAC_VER_27,
+ RTL_GIGA_MAC_VER_28,
+ RTL_GIGA_MAC_VER_29,
+ RTL_GIGA_MAC_VER_30,
+ RTL_GIGA_MAC_VER_31,
+ RTL_GIGA_MAC_VER_32,
+ RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_VER_34,
+ RTL_GIGA_MAC_VER_35,
+ RTL_GIGA_MAC_VER_36,
+ RTL_GIGA_MAC_VER_37,
+ RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
+ RTL_GIGA_MAC_VER_42,
+ RTL_GIGA_MAC_VER_43,
+ RTL_GIGA_MAC_VER_44,
+ RTL_GIGA_MAC_VER_45,
+ RTL_GIGA_MAC_VER_46,
+ RTL_GIGA_MAC_VER_47,
+ RTL_GIGA_MAC_VER_48,
+ RTL_GIGA_MAC_VER_49,
+ RTL_GIGA_MAC_VER_50,
+ RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_52,
+ RTL_GIGA_MAC_VER_60,
+ RTL_GIGA_MAC_VER_61,
+ RTL_GIGA_MAC_NONE
+};
+
+struct rtl8169_private;
+
+void r8169_apply_firmware(struct rtl8169_private *tp);
+u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
+u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr);
+void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ enum mac_version ver);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 67a4d5d45e3a..aaa316be6183 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -31,6 +31,7 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include "r8169.h"
#include "r8169_firmware.h"
#define MODULENAME "r8169"
@@ -84,65 +85,6 @@
#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
-enum mac_version {
- /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
- RTL_GIGA_MAC_VER_02,
- RTL_GIGA_MAC_VER_03,
- RTL_GIGA_MAC_VER_04,
- RTL_GIGA_MAC_VER_05,
- RTL_GIGA_MAC_VER_06,
- RTL_GIGA_MAC_VER_07,
- RTL_GIGA_MAC_VER_08,
- RTL_GIGA_MAC_VER_09,
- RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
- RTL_GIGA_MAC_VER_12,
- RTL_GIGA_MAC_VER_13,
- RTL_GIGA_MAC_VER_14,
- RTL_GIGA_MAC_VER_15,
- RTL_GIGA_MAC_VER_16,
- RTL_GIGA_MAC_VER_17,
- RTL_GIGA_MAC_VER_18,
- RTL_GIGA_MAC_VER_19,
- RTL_GIGA_MAC_VER_20,
- RTL_GIGA_MAC_VER_21,
- RTL_GIGA_MAC_VER_22,
- RTL_GIGA_MAC_VER_23,
- RTL_GIGA_MAC_VER_24,
- RTL_GIGA_MAC_VER_25,
- RTL_GIGA_MAC_VER_26,
- RTL_GIGA_MAC_VER_27,
- RTL_GIGA_MAC_VER_28,
- RTL_GIGA_MAC_VER_29,
- RTL_GIGA_MAC_VER_30,
- RTL_GIGA_MAC_VER_31,
- RTL_GIGA_MAC_VER_32,
- RTL_GIGA_MAC_VER_33,
- RTL_GIGA_MAC_VER_34,
- RTL_GIGA_MAC_VER_35,
- RTL_GIGA_MAC_VER_36,
- RTL_GIGA_MAC_VER_37,
- RTL_GIGA_MAC_VER_38,
- RTL_GIGA_MAC_VER_39,
- RTL_GIGA_MAC_VER_40,
- RTL_GIGA_MAC_VER_41,
- RTL_GIGA_MAC_VER_42,
- RTL_GIGA_MAC_VER_43,
- RTL_GIGA_MAC_VER_44,
- RTL_GIGA_MAC_VER_45,
- RTL_GIGA_MAC_VER_46,
- RTL_GIGA_MAC_VER_47,
- RTL_GIGA_MAC_VER_48,
- RTL_GIGA_MAC_VER_49,
- RTL_GIGA_MAC_VER_50,
- RTL_GIGA_MAC_VER_51,
- RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_60,
- RTL_GIGA_MAC_VER_61,
- RTL_GIGA_MAC_NONE
-};
-
-#define JUMBO_1K ETH_DATA_LEN
#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
@@ -492,6 +434,7 @@ enum rtl_register_content {
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
+ EnAnaPLL = (1 << 14), // 8169
Normal_mode = (1 << 13), // unused
Force_half_dup = (1 << 12), // 8168 8101
Force_rxflow_en = (1 << 11), // 8168 8101
@@ -1078,52 +1021,6 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
}
}
-static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
-{
- rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
-}
-
-static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
-{
- int val;
-
- val = rtl_readphy(tp, reg_addr);
- rtl_writephy(tp, reg_addr, (val & ~m) | p);
-}
-
-static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
- int reg, u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0007);
-
- __phy_write(phydev, 0x1e, extpage);
- __phy_modify(phydev, reg, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
-static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
- u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0005);
-
- __phy_write(phydev, 0x05, parm);
- __phy_modify(phydev, 0x06, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
-static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
- u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0a43);
-
- __phy_write(phydev, 0x13, parm);
- __phy_modify(phydev, 0x14, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1372,7 +1269,7 @@ DECLARE_RTL_COND(rtl_efusear_cond)
return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
}
-static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
@@ -1596,7 +1493,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
if (dev->mtu > TD_MSS_MAX)
features &= ~NETIF_F_ALL_TSO;
- if (dev->mtu > JUMBO_1K &&
+ if (dev->mtu > ETH_DATA_LEN &&
tp->mac_version > RTL_GIGA_MAC_VER_06)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
@@ -2268,22 +2165,6 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
}
}
-struct phy_reg {
- u16 reg;
- u16 val;
-};
-
-static void __rtl_writephy_batch(struct rtl8169_private *tp,
- const struct phy_reg *regs, int len)
-{
- while (len-- > 0) {
- rtl_writephy(tp, regs->reg, regs->val);
- regs++;
- }
-}
-
-#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
-
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
@@ -2293,7 +2174,7 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
}
}
-static void rtl_apply_firmware(struct rtl8169_private *tp)
+void r8169_apply_firmware(struct rtl8169_private *tp)
{
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
if (tp->rtl_fw)
@@ -2315,594 +2196,6 @@ static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
- r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
-}
-
-static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
-{
- phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
-}
-
-static void rtl8168h_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168g_config_eee_phy(tp);
-
- phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
- phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
-}
-
-static void rtl8125_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168h_config_eee_phy(tp);
-
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
-}
-
-static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x06, 0x006e },
- { 0x08, 0x0708 },
- { 0x15, 0x4000 },
- { 0x18, 0x65c7 },
-
- { 0x1f, 0x0001 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x0000 },
-
- { 0x03, 0xff41 },
- { 0x02, 0xdf60 },
- { 0x01, 0x0140 },
- { 0x00, 0x0077 },
- { 0x04, 0x7800 },
- { 0x04, 0x7000 },
-
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf0f9 },
- { 0x04, 0x9800 },
- { 0x04, 0x9000 },
-
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xa000 },
-
- { 0x03, 0xff41 },
- { 0x02, 0xdf20 },
- { 0x01, 0x0140 },
- { 0x00, 0x00bb },
- { 0x04, 0xb800 },
- { 0x04, 0xb000 },
-
- { 0x03, 0xdf41 },
- { 0x02, 0xdc60 },
- { 0x01, 0x6340 },
- { 0x00, 0x007d },
- { 0x04, 0xd800 },
- { 0x04, 0xd000 },
-
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x100a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
-
- { 0x1f, 0x0000 },
- { 0x0b, 0x0000 },
- { 0x00, 0x9200 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0);
-}
-
-static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
-{
- struct pci_dev *pdev = tp->pci_dev;
-
- if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
- (pdev->subsystem_device != 0xe000))
- return;
-
- phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
-}
-
-static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x04, 0x0000 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x9000 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0xa000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x10, 0xf41b },
- { 0x14, 0xfb54 },
- { 0x18, 0xf5c7 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl8169scd_hw_phy_config_quirk(tp);
-}
-
-static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x04, 0x0000 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x9000 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0xa000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0x8480 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x18, 0x67c7 },
- { 0x04, 0x2000 },
- { 0x03, 0x002f },
- { 0x02, 0x4360 },
- { 0x01, 0x0109 },
- { 0x00, 0x3022 },
- { 0x04, 0x2800 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0001);
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_writephy(tp, 0x10, 0xf41b);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b);
-}
-
-static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write(tp->phydev, 0x1d, 0x0f00);
- phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8);
-}
-
-static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_set_bits(tp->phydev, 0x14, BIT(5));
- phy_set_bits(tp->phydev, 0x0d, BIT(5));
- phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98);
-}
-
-static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x1f, 0x0002 },
- { 0x00, 0x88d4 },
- { 0x01, 0x82b1 },
- { 0x03, 0x7002 },
- { 0x08, 0x9e30 },
- { 0x09, 0x01f0 },
- { 0x0a, 0x5500 },
- { 0x0c, 0x00c8 },
- { 0x1f, 0x0003 },
- { 0x12, 0xc096 },
- { 0x16, 0x000a },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x09, 0x2000 },
- { 0x09, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0x9000 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x7eb8 },
- { 0x06, 0x0761 },
- { 0x1f, 0x0003 },
- { 0x16, 0x0f0a },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x7eb8 },
- { 0x06, 0x5461 },
- { 0x1f, 0x0003 },
- { 0x16, 0x0f0a },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
-
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
-
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
-};
-
-static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
-};
-
-static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val)
-{
- u16 reg_val;
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
- reg_val = rtl_readphy(tp, 0x06);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (reg_val != val)
- netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
- else
- rtl_apply_firmware(tp);
-}
-
-static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
-
- /*
- * Rx Error Issue
- * Fine Tune Switching regulator parameter
- */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
- rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
-
- if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
-
- val = rtl_readphy(tp, 0x0d);
-
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- rtl_writephy(tp, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- rtl_writephy(tp, 0x0d, val | set[i]);
- }
- } else {
- phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662);
- r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662);
- }
-
- /* RSET couple improve */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_patchphy(tp, 0x0d, 0x0300);
- rtl_patchphy(tp, 0x0f, 0x0010);
-
- /* Fine tune PLL performance */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
- rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168d_apply_firmware_cond(tp, 0xbf00);
-}
-
-static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
-
- if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
-
- val = rtl_readphy(tp, 0x0d);
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- rtl_writephy(tp, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- rtl_writephy(tp, 0x0d, val | set[i]);
- }
- } else {
- phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642);
- r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642);
- }
-
- /* Fine tune PLL performance */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
- rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
-
- /* Switching regulator Slew rate */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_patchphy(tp, 0x0f, 0x0017);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168d_apply_firmware_cond(tp, 0xb300);
-}
-
-static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x10, 0x0008 },
- { 0x0d, 0x006c },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0xa4d8 },
- { 0x09, 0x281c },
- { 0x07, 0x2883 },
- { 0x0a, 0x6b35 },
- { 0x1d, 0x3da4 },
- { 0x1c, 0xeffd },
- { 0x14, 0x7f52 },
- { 0x18, 0x7fc6 },
- { 0x08, 0x0601 },
- { 0x06, 0x4063 },
- { 0x10, 0xf074 },
- { 0x1f, 0x0003 },
- { 0x13, 0x0789 },
- { 0x12, 0xf4bd },
- { 0x1a, 0x04fd },
- { 0x14, 0x84b0 },
- { 0x1f, 0x0000 },
- { 0x00, 0x9200 },
-
- { 0x1f, 0x0005 },
- { 0x01, 0x0340 },
- { 0x1f, 0x0001 },
- { 0x04, 0x4000 },
- { 0x03, 0x1d21 },
- { 0x02, 0x0c32 },
- { 0x01, 0x0200 },
- { 0x00, 0x5554 },
- { 0x04, 0x4800 },
- { 0x04, 0x4000 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
- { 0x1f, 0x0000 },
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000);
-}
-
-static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0);
- r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040);
- phy_set_bits(tp->phydev, 0x0d, BIT(5));
-}
-
-static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0001 },
- { 0x0b, 0x6c20 },
- { 0x07, 0x2872 },
- { 0x1c, 0xefff },
- { 0x1f, 0x0003 },
- { 0x14, 0x6420 },
- { 0x1f, 0x0000 },
- };
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Enable Delay cap */
- r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- /* Update PFM & 10M TX idle timer */
- r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
-
- r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
-
- /* DCO enable for 10M IDLE Power */
- r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
-
- /* For impedance matching */
- phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
- r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
-
- r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
- phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
{
const u16 w[] = {
@@ -2917,698 +2210,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
}
-static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
+u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Enable Delay cap */
- r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Green Setting */
- r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
- r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
- r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
-
- /* For 4-corner performance improve */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b80);
- rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- /* improve 10M EEE waveform */
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-
- rtl8168f_config_eee_phy(tp);
- rtl_enable_eee(tp);
-
- /* Green feature */
- rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
- rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
- rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
-}
-
-static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* For 4-corner performance improve */
- r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- /* Improve 10M EEE waveform */
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
-
- rtl8168f_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Modify green table for giga & fnet */
- r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
-
- /* Modify green table for 10M */
- r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
-
- /* Disable hiimpedance detection (RTCT) */
- phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
-
- rtl8168f_hw_phy_config(tp);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-}
-
-static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_apply_firmware(tp);
-
- rtl8168f_hw_phy_config(tp);
-}
-
-static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- rtl8168f_hw_phy_config(tp);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Modify green table for giga & fnet */
- r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
-
- /* Modify green table for 10M */
- r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
-
- /* Disable hiimpedance detection (RTCT) */
- phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
-
- /* Modify green table for giga */
- r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
- r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
- r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
- r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
-
- /* uc same-seed solution */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
-
- /* Green feature */
- rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
-{
- phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
-}
-
-static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
- r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
- phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
-}
-
-static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
-{
- int ret;
-
- rtl_apply_firmware(tp);
-
- ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
- if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
- else
- phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
-
- ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
- if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
- else
- phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
-
- /* Enable PHY auto speed down */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* EEE auto-fallback function */
- phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000);
-
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- /* Improve SWR Efficiency */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x5065);
- rtl_writephy(tp, 0x14, 0xd065);
- rtl_writephy(tp, 0x1f, 0x0bc8);
- rtl_writephy(tp, 0x11, 0x5655);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x14, 0x9065);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_apply_firmware(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- u16 dout_tapbin;
- u32 data;
-
- rtl_apply_firmware(tp);
-
- /* CHN EST parameters adjust - giga master */
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
- r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
-
- /* CHN EST parameters adjust - giga slave */
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
- r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
-
- /* enable R-tune & PGA-retune function */
- dout_tapbin = 0;
- data = phy_read_paged(phydev, 0x0a46, 0x13);
- data &= 3;
- data <<= 2;
- dout_tapbin |= data;
- data = phy_read_paged(phydev, 0x0a46, 0x12);
- data &= 0xc000;
- data >>= 14;
- dout_tapbin |= data;
- dout_tapbin = ~(dout_tapbin^0x08);
- dout_tapbin <<= 12;
- dout_tapbin &= 0xf000;
-
- r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
-
- /* SAR ADC performance */
- phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
-
- r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
-
- /* disable phy pfm mode */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(tp);
- rtl8168h_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
-{
- u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
- struct phy_device *phydev = tp->phydev;
- u16 rlen;
- u32 data;
-
- rtl_apply_firmware(tp);
-
- /* CHIN EST parameter update */
- r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
-
- /* enable R-tune & PGA-retune function */
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
+ u16 data1, data2, ioffset;
r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
- data = r8168_mac_ocp_read(tp, 0xdd02);
- ioffset_p3 = ((data & 0x80)>>7);
- ioffset_p3 <<= 3;
-
- data = r8168_mac_ocp_read(tp, 0xdd00);
- ioffset_p3 |= ((data & (0xe000))>>13);
- ioffset_p2 = ((data & (0x1e00))>>9);
- ioffset_p1 = ((data & (0x01e0))>>5);
- ioffset_p0 = ((data & 0x0010)>>4);
- ioffset_p0 <<= 3;
- ioffset_p0 |= (data & (0x07));
- data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
-
- if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f))
- phy_write_paged(phydev, 0x0bcf, 0x16, data);
-
- /* Modify rlen (TX LPF corner frequency) level */
- data = phy_read_paged(phydev, 0x0bcd, 0x16);
- data &= 0x000f;
- rlen = 0;
- if (data > 3)
- rlen = data - 3;
- data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
- phy_write_paged(phydev, 0x0bcd, 0x17, data);
-
- /* disable phy pfm mode */
- phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* Enable PHY auto speed down */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* Enable EEE auto-fallback function */
- phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* set rg_sel_sdm_rate */
- phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* Set rg_sel_sdm_rate */
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- /* Channel estimation parameters */
- r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
- r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
- r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
- r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
- r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
- r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
- r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
- r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
- r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
- r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
- r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
- r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
- r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
- r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
- r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
- r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
- r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
- r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
- r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
- r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
-
- /* Force PWM-mode */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x5065);
- rtl_writephy(tp, 0x14, 0xd065);
- rtl_writephy(tp, 0x1f, 0x0bc8);
- rtl_writephy(tp, 0x12, 0x00ed);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x14, 0x9065);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8117_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
-
- r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
- r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
- r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
- r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
- r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
- r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
- r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
- r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
- r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
- r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
- r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
- r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
- r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
- r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
-
- r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
-
- r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
-
- rtl8168g_disable_aldps(tp);
- rtl8168h_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0003 },
- { 0x08, 0x441d },
- { 0x01, 0x9100 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_patchphy(tp, 0x11, 1 << 12);
- rtl_patchphy(tp, 0x19, 1 << 13);
- rtl_patchphy(tp, 0x10, 1 << 15);
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
-{
- /* Disable ALDPS before ram code */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(100);
-
- rtl_apply_firmware(tp);
-
- phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000);
- phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000);
- phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701);
-}
-
-static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
-{
- /* Disable ALDPS before setting firmware */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(20);
-
- rtl_apply_firmware(tp);
-
- /* EEE setting */
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x10, 0x401f);
- rtl_writephy(tp, 0x19, 0x7030);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0004 },
- { 0x10, 0xc07f },
- { 0x19, 0x7030 },
- { 0x1f, 0x0000 }
- };
-
- /* Disable ALDPS before ram code */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(100);
-
- rtl_apply_firmware(tp);
-
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
-}
-
-static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
- phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
- phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
- phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
- phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
-
- r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
- r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
- r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
- r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
- r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
- r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
- r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
- r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
- r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
- r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
- r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
-
- phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
-
- phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
- phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
- phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
-
- rtl8125_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int i;
+ data1 = r8168_mac_ocp_read(tp, 0xdd02);
+ data2 = r8168_mac_ocp_read(tp, 0xdd00);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
- phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
- phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
- phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
- phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
- phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
- phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
- phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
- phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
-
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
-
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x81B3);
- phy_write(phydev, 0x14, 0x0043);
- phy_write(phydev, 0x14, 0x00A7);
- phy_write(phydev, 0x14, 0x00D6);
- phy_write(phydev, 0x14, 0x00EC);
- phy_write(phydev, 0x14, 0x00F6);
- phy_write(phydev, 0x14, 0x00FB);
- phy_write(phydev, 0x14, 0x00FD);
- phy_write(phydev, 0x14, 0x00FF);
- phy_write(phydev, 0x14, 0x00BB);
- phy_write(phydev, 0x14, 0x0058);
- phy_write(phydev, 0x14, 0x0029);
- phy_write(phydev, 0x14, 0x0013);
- phy_write(phydev, 0x14, 0x0009);
- phy_write(phydev, 0x14, 0x0004);
- phy_write(phydev, 0x14, 0x0002);
- for (i = 0; i < 25; i++)
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x1f, 0x0000);
-
- r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
- r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
-
- rtl_apply_firmware(tp);
-
- phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
-
- r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
-
- phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
- phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
- phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
-
- rtl8125_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl_hw_phy_config(struct net_device *dev)
-{
- static const rtl_generic_fct phy_configs[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
- [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
- [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
- [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
- [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
- [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_13] = NULL,
- [RTL_GIGA_MAC_VER_14] = NULL,
- [RTL_GIGA_MAC_VER_15] = NULL,
- [RTL_GIGA_MAC_VER_16] = NULL,
- [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
- [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
- [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
- [RTL_GIGA_MAC_VER_31] = NULL,
- [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
- [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
- [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
- [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_41] = NULL,
- [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
- };
- struct rtl8169_private *tp = netdev_priv(dev);
+ ioffset = (data2 >> 1) & 0x7ff8;
+ ioffset |= data2 & 0x0007;
+ if (data1 & BIT(7))
+ ioffset |= BIT(15);
- if (phy_configs[tp->mac_version])
- phy_configs[tp->mac_version](tp);
+ return ioffset;
}
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
@@ -3617,21 +2232,28 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
schedule_work(&tp->wk.work);
}
-static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl8169_init_phy(struct rtl8169_private *tp)
{
- rtl_hw_phy_config(dev);
+ r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
- netif_dbg(tp, drv, dev,
- "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ /* set undocumented MAC Reg C+CR Offset 0x82h */
RTL_W8(tp, 0x82, 0x01);
}
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
+ tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
+ tp->pci_dev->subsystem_device == 0xe000)
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
+
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
+ if (rtl_supports_eee(tp))
+ rtl_enable_eee(tp);
+
genphy_soft_reset(tp->phydev);
}
@@ -3675,16 +2297,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -ENODEV;
-
- return phy_mii_ioctl(tp->phydev, ifr, cmd);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4710,9 +3322,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
- rtl_writephy(tp, 0x1f, 0x0c42);
- rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
if (rg_saw_cnt > 0) {
u16 sw_cnt_1ms_ini;
@@ -4887,7 +3497,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
/* firmware is for MAC only */
- rtl_apply_firmware(tp);
+ r8169_apply_firmware(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -4991,6 +3601,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
+ /* disable EEE */
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5005,6 +3618,11 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
+
+ /* disable EEE */
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+
rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5222,11 +3840,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
tp->cp_cmd |= PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- netif_dbg(tp, drv, tp->dev,
- "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
- tp->cp_cmd |= (1 << 14);
- }
+ tp->mac_version == RTL_GIGA_MAC_VER_03)
+ tp->cp_cmd |= EnAnaPLL;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
@@ -5435,7 +4050,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
netif_wake_queue(dev);
}
-static void rtl8169_tx_timeout(struct net_device *dev)
+static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -6240,7 +4855,7 @@ static int rtl_open(struct net_device *dev)
napi_enable(&tp->napi);
- rtl8169_init_phy(dev, tp);
+ rtl8169_init_phy(tp);
rtl_pll_power_up(tp);
@@ -6371,7 +4986,7 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
- rtl8169_init_phy(dev, tp);
+ rtl8169_init_phy(tp);
phy_start(tp->phydev);
@@ -6542,7 +5157,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_fix_features = rtl8169_fix_features,
.ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = rtl_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
@@ -6744,7 +5359,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
{
/* Non-GBit versions don't support jumbo frames */
if (!tp->supports_gmii)
- return JUMBO_1K;
+ return 0;
switch (tp->mac_version) {
/* RTL8169 */
@@ -6825,6 +5440,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chipset, region;
int jumbo_max, rc;
+ /* Some tools for creating an initramfs don't consider softdeps, then
+ * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
+ * PHY driver is used that doesn't work with most chip versions.
+ */
+ if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
+ dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
+ return -ENOENT;
+ }
+
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
return -ENOMEM;
@@ -6966,10 +5590,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
- /* MTU range: 60 - hw-specific max */
- dev->min_mtu = ETH_ZLEN;
jumbo_max = rtl_jumbo_max(tp);
- dev->max_mtu = jumbo_max;
+ if (jumbo_max)
+ dev->max_mtu = jumbo_max;
rtl_set_irq_mask(tp);
@@ -6999,7 +5622,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
pci_irq_vector(pdev, 0));
- if (jumbo_max > JUMBO_1K)
+ if (jumbo_max)
netif_info(tp, probe, dev,
"jumbo features [frames: %d bytes, tx checksumming: %s]\n",
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
new file mode 100644
index 000000000000..e367e77c773b
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -0,0 +1,1307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+#include "r8169.h"
+
+typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp,
+ struct phy_device *phydev);
+
+static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
+ int reg, u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0007);
+
+ __phy_write(phydev, 0x1e, extpage);
+ __phy_modify(phydev, reg, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0005);
+
+ __phy_write(phydev, 0x05, parm);
+ __phy_modify(phydev, 0x06, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0a43);
+
+ __phy_write(phydev, 0x13, parm);
+ __phy_modify(phydev, 0x14, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+struct phy_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void __rtl_writephy_batch(struct phy_device *phydev,
+ const struct phy_reg *regs, int len)
+{
+ phy_lock_mdio_bus(phydev);
+
+ while (len-- > 0) {
+ __phy_write(phydev, regs->reg, regs->val);
+ regs++;
+ }
+
+ phy_unlock_mdio_bus(phydev);
+}
+
+#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a))
+
+static void rtl8168f_config_eee_phy(struct phy_device *phydev)
+{
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
+ r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
+}
+
+static void rtl8168g_config_eee_phy(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4));
+}
+
+static void rtl8168h_config_eee_phy(struct phy_device *phydev)
+{
+ rtl8168g_config_eee_phy(phydev);
+
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
+}
+
+static void rtl8125_config_eee_phy(struct phy_device *phydev)
+{
+ rtl8168h_config_eee_phy(phydev);
+
+ phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+}
+
+static void rtl8169s_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x06, 0x006e },
+ { 0x08, 0x0708 },
+ { 0x15, 0x4000 },
+ { 0x18, 0x65c7 },
+
+ { 0x1f, 0x0001 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x0000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf60 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x0077 },
+ { 0x04, 0x7800 },
+ { 0x04, 0x7000 },
+
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf0f9 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xa000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x00bb },
+ { 0x04, 0xb800 },
+ { 0x04, 0xb000 },
+
+ { 0x03, 0xdf41 },
+ { 0x02, 0xdc60 },
+ { 0x01, 0x6340 },
+ { 0x00, 0x007d },
+ { 0x04, 0xd800 },
+ { 0x04, 0xd000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x100a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+
+ { 0x1f, 0x0000 },
+ { 0x0b, 0x0000 },
+ { 0x00, 0x9200 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0002, 0x01, 0x90d0);
+}
+
+static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x14, 0xfb54 },
+ { 0x18, 0xf5c7 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x8480 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x18, 0x67c7 },
+ { 0x04, 0x2000 },
+ { 0x03, 0x002f },
+ { 0x02, 0x4360 },
+ { 0x01, 0x0109 },
+ { 0x00, 0x3022 },
+ { 0x04, 0x2800 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_write(phydev, 0x10, 0xf41b);
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0001, 0x10, 0xf41b);
+}
+
+static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write(phydev, 0x1d, 0x0f00);
+ phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8);
+}
+
+static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+ phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98);
+}
+
+static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1f, 0x0002 },
+ { 0x00, 0x88d4 },
+ { 0x01, 0x82b1 },
+ { 0x03, 0x7002 },
+ { 0x08, 0x9e30 },
+ { 0x09, 0x01f0 },
+ { 0x0a, 0x5500 },
+ { 0x0c, 0x00c8 },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xc096 },
+ { 0x16, 0x000a },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x09, 0x2000 },
+ { 0x09, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x0761 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x5461 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+};
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+};
+
+static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
+ struct phy_device *phydev,
+ u16 val)
+{
+ u16 reg_val;
+
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x001b);
+ reg_val = phy_read(phydev, 0x06);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ if (reg_val != val)
+ phydev_warn(phydev, "chipset not ready for firmware\n");
+ else
+ r8169_apply_firmware(tp);
+}
+
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
+
+ /*
+ * Rx Error Issue
+ * Fine Tune Switching regulator parameter
+ */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x0b, 0x00ef, 0x0010);
+ phy_modify(phydev, 0x0c, 0x5d00, 0xa200);
+
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
+ int val;
+
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
+
+ val = phy_read(phydev, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+ } else {
+ phy_write_paged(phydev, 0x0002, 0x05, 0x6662);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662);
+ }
+
+ /* RSET couple improve */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_set_bits(phydev, 0x0d, 0x0300);
+ phy_set_bits(phydev, 0x0f, 0x0010);
+
+ /* Fine tune PLL performance */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x02, 0x0600, 0x0100);
+ phy_clear_bits(phydev, 0x03, 0xe000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00);
+}
+
+static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
+
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
+ int val;
+
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
+
+ val = phy_read(phydev, 0x0d);
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+ } else {
+ phy_write_paged(phydev, 0x0002, 0x05, 0x2642);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642);
+ }
+
+ /* Fine tune PLL performance */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x02, 0x0600, 0x0100);
+ phy_clear_bits(phydev, 0x03, 0xe000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Switching regulator Slew rate */
+ phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017);
+
+ rtl8168d_apply_firmware_cond(tp, phydev, 0xb300);
+}
+
+static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x10, 0x0008 },
+ { 0x0d, 0x006c },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0xa4d8 },
+ { 0x09, 0x281c },
+ { 0x07, 0x2883 },
+ { 0x0a, 0x6b35 },
+ { 0x1d, 0x3da4 },
+ { 0x1c, 0xeffd },
+ { 0x14, 0x7f52 },
+ { 0x18, 0x7fc6 },
+ { 0x08, 0x0601 },
+ { 0x06, 0x4063 },
+ { 0x10, 0xf074 },
+ { 0x1f, 0x0003 },
+ { 0x13, 0x0789 },
+ { 0x12, 0xf4bd },
+ { 0x1a, 0x04fd },
+ { 0x14, 0x84b0 },
+ { 0x1f, 0x0000 },
+ { 0x00, 0x9200 },
+
+ { 0x1f, 0x0005 },
+ { 0x01, 0x0340 },
+ { 0x1f, 0x0001 },
+ { 0x04, 0x4000 },
+ { 0x03, 0x1d21 },
+ { 0x02, 0x0c32 },
+ { 0x01, 0x0200 },
+ { 0x00, 0x5554 },
+ { 0x04, 0x4800 },
+ { 0x04, 0x4000 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+ { 0x1f, 0x0000 },
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+ r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000);
+}
+
+static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040);
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x6c20 },
+ { 0x07, 0x2872 },
+ { 0x1c, 0xefff },
+ { 0x1f, 0x0003 },
+ { 0x14, 0x6420 },
+ { 0x1f, 0x0000 },
+ };
+
+ r8169_apply_firmware(tp);
+
+ /* Enable Delay cap */
+ r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ /* Update PFM & 10M TX idle timer */
+ r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
+
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
+ /* DCO enable for 10M IDLE Power */
+ r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
+
+ /* For impedance matching */
+ phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+ r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
+
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
+ phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
+}
+
+static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ /* Enable Delay cap */
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Green Setting */
+ r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
+ r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
+ r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
+
+ /* For 4-corner performance improve */
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x8b80);
+ phy_set_bits(phydev, 0x17, 0x0006);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ /* improve 10M EEE waveform */
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+
+ rtl8168f_config_eee_phy(phydev);
+
+ /* Green feature */
+ phy_write(phydev, 0x1f, 0x0003);
+ phy_set_bits(phydev, 0x19, BIT(0));
+ phy_set_bits(phydev, 0x10, BIT(10));
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8));
+}
+
+static void rtl8168f_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* For 4-corner performance improve */
+ r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ /* Improve 10M EEE waveform */
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+
+ rtl8168f_config_eee_phy(phydev);
+}
+
+static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+}
+
+static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+}
+
+static void rtl8411_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
+
+ /* Modify green table for giga */
+ r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
+ r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
+
+ /* uc same-seed solution */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
+
+ /* Green feature */
+ phy_write(phydev, 0x1f, 0x0003);
+ phy_clear_bits(phydev, 0x19, BIT(0));
+ phy_clear_bits(phydev, 0x10, BIT(10));
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168g_disable_aldps(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0);
+}
+
+static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
+ r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
+}
+
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ r8169_apply_firmware(tp);
+
+ ret = phy_read_paged(phydev, 0x0a46, 0x10);
+ if (ret & BIT(8))
+ phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0);
+ else
+ phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15));
+
+ ret = phy_read_paged(phydev, 0x0a46, 0x13);
+ if (ret & BIT(8))
+ phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1));
+ else
+ phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0);
+
+ /* Enable PHY auto speed down */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* EEE auto-fallback function */
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ /* Improve SWR Efficiency */
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x5065);
+ phy_write(phydev, 0x14, 0xd065);
+ phy_write(phydev, 0x1f, 0x0bc8);
+ phy_write(phydev, 0x11, 0x5655);
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x14, 0x9065);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ u16 dout_tapbin;
+ u32 data;
+
+ r8169_apply_firmware(tp);
+
+ /* CHN EST parameters adjust - giga master */
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
+ r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
+
+ /* CHN EST parameters adjust - giga slave */
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
+ r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
+
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
+
+ /* enable R-tune & PGA-retune function */
+ dout_tapbin = 0;
+ data = phy_read_paged(phydev, 0x0a46, 0x13);
+ data &= 3;
+ data <<= 2;
+ dout_tapbin |= data;
+ data = phy_read_paged(phydev, 0x0a46, 0x12);
+ data &= 0xc000;
+ data >>= 14;
+ dout_tapbin |= data;
+ dout_tapbin = ~(dout_tapbin ^ 0x08);
+ dout_tapbin <<= 12;
+ dout_tapbin &= 0xf000;
+
+ r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ /* SAR ADC performance */
+ phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
+
+ r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
+
+ /* disable phy pfm mode */
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168h_config_eee_phy(phydev);
+}
+
+static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ u16 ioffset, rlen;
+ u32 data;
+
+ r8169_apply_firmware(tp);
+
+ /* CHIN EST parameter update */
+ r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
+
+ /* enable R-tune & PGA-retune function */
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ ioffset = rtl8168h_2_get_adc_bias_ioffset(tp);
+ if (ioffset != 0xffff)
+ phy_write_paged(phydev, 0x0bcf, 0x16, ioffset);
+
+ /* Modify rlen (TX LPF corner frequency) level */
+ data = phy_read_paged(phydev, 0x0bcd, 0x16);
+ data &= 0x000f;
+ rlen = 0;
+ if (data > 3)
+ rlen = data - 3;
+ data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12);
+ phy_write_paged(phydev, 0x0bcd, 0x17, data);
+
+ /* disable phy pfm mode */
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Enable PHY auto speed down */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* Enable EEE auto-fallback function */
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ /* set rg_sel_sdm_rate */
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ /* Set rg_sel_sdm_rate */
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ /* Channel estimation parameters */
+ r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
+ r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
+ r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
+ r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
+ r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
+ r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
+ r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
+ r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
+ r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
+ r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
+ r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
+ r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
+ r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
+ r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
+ r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
+ r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
+ r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
+
+ /* Force PWM-mode */
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x5065);
+ phy_write(phydev, 0x14, 0xd065);
+ phy_write(phydev, 0x1f, 0x0bc8);
+ phy_write(phydev, 0x12, 0x00ed);
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x14, 0x9065);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8117_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
+
+ r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
+ r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
+ r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
+ r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
+ r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
+ r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
+ r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
+ r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
+ r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
+ r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
+
+ r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168h_config_eee_phy(phydev);
+}
+
+static void rtl8102e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0003 },
+ { 0x08, 0x441d },
+ { 0x01, 0x9100 },
+ { 0x1f, 0x0000 }
+ };
+
+ phy_set_bits(phydev, 0x11, BIT(12));
+ phy_set_bits(phydev, 0x19, BIT(13));
+ phy_set_bits(phydev, 0x10, BIT(15));
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Disable ALDPS before ram code */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(100);
+
+ r8169_apply_firmware(tp);
+
+ phy_write_paged(phydev, 0x0005, 0x1a, 0x0000);
+ phy_write_paged(phydev, 0x0004, 0x1c, 0x0000);
+ phy_write_paged(phydev, 0x0001, 0x15, 0x7701);
+}
+
+static void rtl8402_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Disable ALDPS before setting firmware */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(20);
+
+ r8169_apply_firmware(tp);
+
+ /* EEE setting */
+ phy_write(phydev, 0x1f, 0x0004);
+ phy_write(phydev, 0x10, 0x401f);
+ phy_write(phydev, 0x19, 0x7030);
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(100);
+
+ r8169_apply_firmware(tp);
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
+ phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
+ phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
+ phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
+
+ r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
+ r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
+ r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
+ r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
+ r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
+ r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
+ r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
+ r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
+ r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
+ r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
+ r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
+
+ phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
+ r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
+
+ phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
+ phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
+ phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ int i;
+
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
+ phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
+ phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
+ phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
+ phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
+ phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x80a2);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x16, 0x809c);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81B3);
+ phy_write(phydev, 0x14, 0x0043);
+ phy_write(phydev, 0x14, 0x00A7);
+ phy_write(phydev, 0x14, 0x00D6);
+ phy_write(phydev, 0x14, 0x00EC);
+ phy_write(phydev, 0x14, 0x00F6);
+ phy_write(phydev, 0x14, 0x00FB);
+ phy_write(phydev, 0x14, 0x00FD);
+ phy_write(phydev, 0x14, 0x00FF);
+ phy_write(phydev, 0x14, 0x00BB);
+ phy_write(phydev, 0x14, 0x0058);
+ phy_write(phydev, 0x14, 0x0029);
+ phy_write(phydev, 0x14, 0x0013);
+ phy_write(phydev, 0x14, 0x0009);
+ phy_write(phydev, 0x14, 0x0004);
+ phy_write(phydev, 0x14, 0x0002);
+ for (i = 0; i < 25; i++)
+ phy_write(phydev, 0x14, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
+ r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
+
+ r8169_apply_firmware(tp);
+
+ phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
+
+ r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
+
+ phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
+ phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(phydev);
+}
+
+void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ enum mac_version ver)
+{
+ static const rtl_phy_cfg_fct phy_configs[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
+ [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_31] = NULL,
+ [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
+ [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
+ [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_41] = NULL,
+ [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
+ [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
+ };
+
+ if (phy_configs[ver])
+ phy_configs[ver](tp, phydev);
+}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4b13a184bfc7..067ad25553b9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1425,7 +1425,7 @@ out_napi_off:
}
/* Timeout function for Ethernet AVB */
-static void ravb_tx_timeout(struct net_device *ndev)
+static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ravb_private *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e19b49c4013e..58ca126518a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tsu) {
add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
- add_tsu_reg(TSU_FWEN0);
- add_tsu_reg(TSU_FWEN1);
- add_tsu_reg(TSU_FCM);
- add_tsu_reg(TSU_BSYSL0);
- add_tsu_reg(TSU_BSYSL1);
- add_tsu_reg(TSU_PRISL0);
- add_tsu_reg(TSU_PRISL1);
- add_tsu_reg(TSU_FWSL0);
- add_tsu_reg(TSU_FWSL1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_FWEN0);
+ add_tsu_reg(TSU_FWEN1);
+ add_tsu_reg(TSU_FCM);
+ add_tsu_reg(TSU_BSYSL0);
+ add_tsu_reg(TSU_BSYSL1);
+ add_tsu_reg(TSU_PRISL0);
+ add_tsu_reg(TSU_PRISL1);
+ add_tsu_reg(TSU_FWSL0);
+ add_tsu_reg(TSU_FWSL1);
+ }
add_tsu_reg(TSU_FWSLC);
- add_tsu_reg(TSU_QTAGM0);
- add_tsu_reg(TSU_QTAGM1);
- add_tsu_reg(TSU_FWSR);
- add_tsu_reg(TSU_FWINMK);
- add_tsu_reg(TSU_ADQT0);
- add_tsu_reg(TSU_ADQT1);
- add_tsu_reg(TSU_VTAG0);
- add_tsu_reg(TSU_VTAG1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_QTAGM0);
+ add_tsu_reg(TSU_QTAGM1);
+ add_tsu_reg(TSU_FWSR);
+ add_tsu_reg(TSU_FWINMK);
+ add_tsu_reg(TSU_ADQT0);
+ add_tsu_reg(TSU_ADQT1);
+ add_tsu_reg(TSU_VTAG0);
+ add_tsu_reg(TSU_VTAG1);
+ }
add_tsu_reg(TSU_ADSBSY);
add_tsu_reg(TSU_TEN);
add_tsu_reg(TSU_POST1);
@@ -2478,7 +2482,7 @@ out_napi_off:
}
/* Timeout function */
-static void sh_eth_tx_timeout(struct net_device *ndev)
+static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
@@ -2643,20 +2647,6 @@ static int sh_eth_close(struct net_device *ndev)
return 0;
}
-/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
{
if (netif_running(ndev))
@@ -3155,7 +3145,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3171,7 +3161,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index bc4f951315da..7585cd2270ba 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2159,7 +2159,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
if (err)
rocker_world_fib4_abort(rocker);
@@ -2201,7 +2201,7 @@ static int rocker_router_fib_event(struct notifier_block *nb,
fib_work->event = event;
switch (event) {
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_DEL:
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 52ed111d98f4..c705743d69f7 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1572,7 +1572,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void sxgbe_tx_timeout(struct net_device *dev)
+static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -1939,9 +1939,7 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!dev->phydev)
- return -EINVAL;
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
+ ret = phy_do_ioctl(dev, rq, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 632a7c85964d..128ee7cda1ed 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -79,7 +79,7 @@ static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
-static void ether3_timeout(struct net_device *dev);
+static void ether3_timeout(struct net_device *dev, unsigned int txqueue);
#define BUS_16 2
#define BUS_8 1
@@ -450,7 +450,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
}
-static void ether3_timeout(struct net_device *dev)
+static void ether3_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 276c7cae7cee..8507ff242014 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -645,7 +645,7 @@ sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void timeout(struct net_device *dev)
+static void timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5f36774bf4b8..ea5a9220196c 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -21,8 +21,6 @@ config SFC
depends on PCI
select MDIO
select CRC32
- select I2C
- select I2C_ALGOBIT
imply PTP_1588_CLOCK
---help---
This driver supports 10/40-gigabit Ethernet cards based on
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index c5c297e78d06..87d093da22ca 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
- selftest.o ethtool.o ptp.o tx_tso.o \
- mcdi.o mcdi_port.o mcdi_mon.o
+sfc-y += efx.o efx_common.o efx_channels.o nic.o \
+ farch.o siena.o ef10.o \
+ tx.o tx_common.o tx_tso.o rx.o rx_common.o \
+ selftest.o ethtool.o ethtool_common.o ptp.o \
+ mcdi.o mcdi_port.o mcdi_port_common.o \
+ mcdi_functions.o mcdi_filters.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4d9bbccc6f89..52113b7529d6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -5,11 +5,15 @@
*/
#include "net_driver.h"
+#include "rx_common.h"
#include "ef10_regs.h"
#include "io.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
#include "nic.h"
+#include "mcdi_filters.h"
#include "workarounds.h"
#include "selftest.h"
#include "ef10_sriov.h"
@@ -25,28 +29,6 @@ enum {
EFX_EF10_TEST = 1,
EFX_EF10_REFILL,
};
-/* The maximum size of a shared RSS context */
-/* TODO: this should really be from the mcdi protocol export */
-#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
-
-/* The filter table(s) are managed by firmware and we have write-only
- * access. When removing filters we must identify them to the
- * firmware by a 64-bit handle, but this is too wide for Linux kernel
- * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
- * be able to tell in advance whether a requested insertion will
- * replace an existing filter. Therefore we maintain a software hash
- * table, which should be at least as large as the hardware hash
- * table.
- *
- * Huntington has a single 8K filter table shared between all filter
- * types and both ports.
- */
-#define HUNT_FILTER_TBL_ROWS 8192
-
-#define EFX_EF10_FILTER_ID_INVALID 0xffff
-
-#define EFX_EF10_FILTER_DEV_UC_MAX 32
-#define EFX_EF10_FILTER_DEV_MC_MAX 256
/* VLAN list entry */
struct efx_ef10_vlan {
@@ -54,95 +36,8 @@ struct efx_ef10_vlan {
u16 vid;
};
-enum efx_ef10_default_filters {
- EFX_EF10_BCAST,
- EFX_EF10_UCDEF,
- EFX_EF10_MCDEF,
- EFX_EF10_VXLAN4_UCDEF,
- EFX_EF10_VXLAN4_MCDEF,
- EFX_EF10_VXLAN6_UCDEF,
- EFX_EF10_VXLAN6_MCDEF,
- EFX_EF10_NVGRE4_UCDEF,
- EFX_EF10_NVGRE4_MCDEF,
- EFX_EF10_NVGRE6_UCDEF,
- EFX_EF10_NVGRE6_MCDEF,
- EFX_EF10_GENEVE4_UCDEF,
- EFX_EF10_GENEVE4_MCDEF,
- EFX_EF10_GENEVE6_UCDEF,
- EFX_EF10_GENEVE6_MCDEF,
-
- EFX_EF10_NUM_DEFAULT_FILTERS
-};
-
-/* Per-VLAN filters information */
-struct efx_ef10_filter_vlan {
- struct list_head list;
- u16 vid;
- u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
- u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
- u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
-};
-
-struct efx_ef10_dev_addr {
- u8 addr[ETH_ALEN];
-};
-
-struct efx_ef10_filter_table {
-/* The MCDI match masks supported by this fw & hw, in order of priority */
- u32 rx_match_mcdi_flags[
- MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
- unsigned int rx_match_count;
-
- struct rw_semaphore lock; /* Protects entries */
- struct {
- unsigned long spec; /* pointer to spec plus flag bits */
-/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
-/* unused flag 1UL */
-#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
-#define EFX_EF10_FILTER_FLAGS 3UL
- u64 handle; /* firmware handle */
- } *entry;
-/* Shadow of net_device address lists, guarded by mac_lock */
- struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
- struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
- int dev_uc_count;
- int dev_mc_count;
- bool uc_promisc;
- bool mc_promisc;
-/* Whether in multicast promiscuous mode when last changed */
- bool mc_promisc_last;
- bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
- bool vlan_filter;
- struct list_head vlan_list;
-};
-
-/* An arbitrary search limit for the software hash table */
-#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-
-static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
-static void efx_ef10_filter_table_remove(struct efx_nic *efx);
-static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
-static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan);
-static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
-static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
-{
- WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
- return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
-}
-
-static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
-{
- return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
-}
-
-static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
-{
- return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
-}
-
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -185,24 +80,6 @@ static bool efx_ef10_is_vf(struct efx_nic *efx)
return efx->type->is_vf;
}
-static int efx_ef10_get_pf_index(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
- sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < sizeof(outbuf))
- return -EIO;
-
- nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
- return 0;
-}
-
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_get_vf_index(struct efx_nic *efx)
{
@@ -273,24 +150,9 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
u8 vi_window_mode = MCDI_BYTE(outbuf,
GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
- switch (vi_window_mode) {
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
- efx->vi_stride = 8192;
- break;
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
- efx->vi_stride = 16384;
- break;
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
- efx->vi_stride = 65536;
- break;
- default:
- netif_err(efx, probe, efx->net_dev,
- "Unrecognised VI window mode %d\n",
- vi_window_mode);
- return -EIO;
- }
- netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
- efx->vi_stride);
+ rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
+ if (rc)
+ return rc;
} else {
/* keep default VI stride */
netif_dbg(efx, probe, efx->net_dev,
@@ -576,7 +438,7 @@ static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
if (efx->filter_state) {
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (rc)
@@ -605,7 +467,7 @@ static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
if (efx->filter_state) {
down_write(&efx->filter_sem);
- efx_ef10_filter_del_vlan(efx, vlan->vid);
+ efx_mcdi_filter_del_vlan(efx, vlan->vid);
up_write(&efx->filter_sem);
}
@@ -689,7 +551,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
}
nic_data->warm_boot_count = rc;
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
@@ -725,7 +587,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail4;
- rc = efx_ef10_get_pf_index(efx);
+ rc = efx_get_pf_index(efx, &nic_data->pf_index);
if (rc)
goto fail5;
@@ -831,22 +693,6 @@ fail1:
return rc;
}
-static int efx_ef10_free_vis(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF_ERR(outbuf);
- size_t outlen;
- int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
-
- /* -EALREADY means nothing to free, so ignore */
- if (rc == -EALREADY)
- rc = 0;
- if (rc)
- efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
- rc);
- return rc;
-}
-
#ifdef EFX_USE_PIO
static void efx_ef10_free_piobufs(struct efx_nic *efx)
@@ -1084,12 +930,12 @@ static void efx_ef10_remove(struct efx_nic *efx)
efx_mcdi_mon_remove(efx);
- efx_ef10_rx_free_indir_table(efx);
+ efx_mcdi_rx_free_indir_table(efx);
if (nic_data->wc_membase)
iounmap(nic_data->wc_membase);
- rc = efx_ef10_free_vis(efx);
+ rc = efx_mcdi_free_vis(efx);
WARN_ON(rc != 0);
if (!nic_data->must_restore_piobufs)
@@ -1260,28 +1106,10 @@ static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
- MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
- rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
- return -EIO;
- netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
- MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
-
- nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
- nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
- return 0;
+ return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
+ &nic_data->n_allocated_vis);
}
/* Note that the failure path of this function does not free
@@ -1363,7 +1191,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
}
/* In case the last attached driver failed to free VIs, do it now */
- rc = efx_ef10_free_vis(efx);
+ rc = efx_mcdi_free_vis(efx);
if (rc != 0)
return rc;
@@ -1384,7 +1212,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
efx->max_tx_channels =
nic_data->n_allocated_vis / EFX_TXQ_TYPES;
- efx_ef10_free_vis(efx);
+ efx_mcdi_free_vis(efx);
return -EAGAIN;
}
@@ -1401,7 +1229,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
}
/* Shrink the original UC mapping of the memory BAR */
- membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ membase = ioremap(efx->membase_phys, uc_mem_map_size);
if (!membase) {
netif_err(efx, probe, efx->net_dev,
"could not shrink memory BAR to %x\n",
@@ -1490,7 +1318,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
return 0;
}
-static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
#ifdef CONFIG_SFC_SRIOV
@@ -1503,7 +1331,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
@@ -1571,7 +1399,7 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
*/
if ((reset_type == RESET_TYPE_ALL ||
reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
- efx_ef10_reset_mc_allocations(efx);
+ efx_ef10_table_reset_mc_allocations(efx);
return rc;
}
@@ -2187,7 +2015,7 @@ static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
/* All our allocations have been reset */
- efx_ef10_reset_mc_allocations(efx);
+ efx_ef10_table_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -2408,20 +2236,15 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
- EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_nic_data *nic_data;
bool tso_v2 = false;
- size_t inlen;
- dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
- int i;
- BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+
+ nic_data = efx->nic_data;
/* Only attempt to enable TX timestamping if we have the license for it,
* otherwise TXQ init will fail
@@ -2448,51 +2271,9 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
channel->channel);
}
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
-
- dma_addr = tx_queue->txd.buf.dma_addr;
-
- netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
- tx_queue->queue, entries, (u64)dma_addr);
-
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
-
- do {
- MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
- /* This flag was removed from mcdi_pcol.h for
- * the non-_EXT version of INIT_TXQ. However,
- * firmware still honours it.
- */
- INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
- INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
- INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
- tx_queue->timestamping);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc == -ENOSPC && tso_v2) {
- /* Retry without TSOv2 if we're short on contexts. */
- tso_v2 = false;
- netif_warn(efx, probe, efx->net_dev,
- "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
- } else if (rc) {
- efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
- MC_CMD_INIT_TXQ_EXT_IN_LEN,
- NULL, 0, rc);
- goto fail;
- }
- } while (rc);
+ rc = efx_mcdi_tx_init(tx_queue, tso_v2);
+ if (rc)
+ goto fail;
/* A previous user of this TX queue might have set us up the
* bomb by writing a descriptor to the TX push collector but
@@ -2530,35 +2311,6 @@ fail:
tx_queue->queue);
}
-static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = tx_queue->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
- tx_queue->queue);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
-static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
-{
- efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
-}
-
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
@@ -2637,527 +2389,6 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
- 1 << RSS_MODE_HASH_DST_ADDR_LBN)
-#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
- 1 << RSS_MODE_HASH_DST_PORT_LBN)
-#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
- (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
- (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
-
-static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
-{
- /* Firmware had a bug (sfc bug 61952) where it would not actually
- * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
- * This meant that it would always contain whatever was previously
- * in the MCDI buffer. Fortunately, all firmware versions with
- * this bug have the same default flags value for a newly-allocated
- * RSS context, and the only time we want to get the flags is just
- * after allocating. Moreover, the response has a 32-bit hole
- * where the context ID would be in the request, so we can use an
- * overlength buffer in the request and pre-fill the flags field
- * with what we believe the default to be. Thus if the firmware
- * has the bug, it will leave our pre-filled value in the flags
- * field of the response, and we will get the right answer.
- *
- * However, this does mean that this function should NOT be used if
- * the RSS context flags might not be their defaults - it is ONLY
- * reliably correct for a newly-allocated RSS context.
- */
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
- size_t outlen;
- int rc;
-
- /* Check we have a hole for the context ID */
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
- RSS_CONTEXT_FLAGS_DEFAULT);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
- sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
- if (rc == 0) {
- if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
- rc = -EIO;
- else
- *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
- }
- return rc;
-}
-
-/* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
- * If we fail, we just leave the RSS context at its default hash settings,
- * which is safe but may slightly reduce performance.
- * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
- * just need to set the UDP ports flags (for both IP versions).
- */
-static void efx_ef10_set_rss_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
- u32 flags;
-
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
-
- if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
- return;
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
- flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
- if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
- NULL, 0, NULL))
- /* Succeeded, so UDP 4-tuple is now enabled */
- ctx->rx_hash_udp_4tuple = true;
-}
-
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
- struct efx_rss_context *ctx,
- unsigned *context_size)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
- u32 alloc_type = exclusive ?
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
- unsigned rss_spread = exclusive ?
- efx->rss_spread :
- min(rounddown_pow_of_two(efx->rss_spread),
- EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
-
- if (!exclusive && rss_spread == 1) {
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- if (context_size)
- *context_size = 1;
- return 0;
- }
-
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
- return -EOPNOTSUPP;
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
- return -EIO;
-
- ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
-
- if (context_size)
- *context_size = rss_spread;
-
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
- efx_ef10_set_rss_flags(efx, ctx);
-
- return 0;
-}
-
-static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
- context);
- return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
- const u32 *rx_indir_table, const u8 *key)
-{
- MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
- MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
- int i, rc;
-
- MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
- context);
- BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
- MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
-
- /* This iterates over the length of efx->rss_context.rx_indir_table, but
- * copies bytes from rx_indir_table. That's because the latter is a
- * pointer rather than an array, but should have the same length.
- * The efx->rss_context.rx_hash_key loop below is similar.
- */
- for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
- MCDI_PTR(tablebuf,
- RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) rx_indir_table[i];
-
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
- sizeof(tablebuf), NULL, 0, NULL);
- if (rc != 0)
- return rc;
-
- MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
- context);
- BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
- MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
- for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
- MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
-
- return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
- sizeof(keybuf), NULL, 0, NULL);
-}
-
-static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
-{
- int rc;
-
- if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
- WARN_ON(rc != 0);
- }
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
-}
-
-static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
- unsigned *context_size)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
- context_size);
-
- if (rc != 0)
- return rc;
-
- nic_data->rx_rss_context_exclusive = false;
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
- return 0;
-}
-
-static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
-
- if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
- rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
- NULL);
- if (rc == -EOPNOTSUPP)
- return rc;
- else if (rc != 0)
- goto fail1;
- }
-
- rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
- rx_indir_table, key);
- if (rc != 0)
- goto fail2;
-
- if (efx->rss_context.context_id != old_rx_rss_context &&
- old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
- WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
- if (rx_indir_table != efx->rss_context.rx_indir_table)
- memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key != efx->rss_context.rx_hash_key)
- memcpy(efx->rss_context.rx_hash_key, key,
- efx->type->rx_hash_key_size);
-
- return 0;
-
-fail2:
- if (old_rx_rss_context != efx->rss_context.context_id) {
- WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
- efx->rss_context.context_id = old_rx_rss_context;
- }
-fail1:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
- if (rc)
- return rc;
- }
-
- if (!rx_indir_table) /* Delete this context */
- return efx_ef10_free_rss_context(efx, ctx->context_id);
-
- rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
- rx_indir_table, key);
- if (rc)
- return rc;
-
- memcpy(ctx->rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
-
- return 0;
-}
-
-static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
- MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
- MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
- size_t outlen;
- int rc, i;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
- MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
-
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
- return -ENOENT;
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
- MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
- tablebuf, sizeof(tablebuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
- RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
- MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
- keybuf, sizeof(keybuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
- ctx->rx_hash_key[i] = MCDI_PTR(
- keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
-
- return 0;
-}
-
-static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
-{
- int rc;
-
- mutex_lock(&efx->rss_lock);
- rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
-static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_rss_context *ctx;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- if (!nic_data->must_restore_rss_contexts)
- return;
-
- list_for_each_entry(ctx, &efx->rss_context.list, list) {
- /* previous NIC RSS context is gone */
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- /* so try to allocate a new one */
- rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
- ctx->rx_indir_table,
- ctx->rx_hash_key);
- if (rc)
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore RSS context %u, rc=%d"
- "; RSS filters may fail to be applied\n",
- ctx->user_id, rc);
- }
- nic_data->must_restore_rss_contexts = false;
-}
-
-static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- int rc;
-
- if (efx->rss_spread == 1)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
-
- rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
-
- if (rc == -ENOBUFS && !user) {
- unsigned context_size;
- bool mismatch = false;
- size_t i;
-
- for (i = 0;
- i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
- i++)
- mismatch = rx_indir_table[i] !=
- ethtool_rxfh_indir_default(i, efx->rss_spread);
-
- rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
- if (rc == 0) {
- if (context_size != efx->rss_spread)
- netif_warn(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one of"
- " different size."
- " Wanted %u, got %u.\n",
- efx->rss_spread, context_size);
- else if (mismatch)
- netif_warn(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one but"
- " could not apply custom"
- " indirection.\n");
- else
- netif_info(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one.\n");
- }
- }
- return rc;
-}
-
-static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table
- __attribute__ ((unused)),
- const u8 *key
- __attribute__ ((unused)))
-{
- if (user)
- return -EOPNOTSUPP;
- if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
- return 0;
- return efx_ef10_rx_push_shared_rss_config(efx, NULL);
-}
-
-static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
-{
- return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
- (rx_queue->ptr_mask + 1) *
- sizeof(efx_qword_t),
- GFP_KERNEL);
-}
-
-static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
-{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
- EFX_BUF_SIZE));
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t inlen;
- dma_addr_t dma_addr;
- int rc;
- int i;
- BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
-
- rx_queue->scatter_n = 0;
- rx_queue->scatter_len = 0;
-
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
- efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1,
- INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
-
- dma_addr = rx_queue->rxd.buf.dma_addr;
-
- netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
- efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
-
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc)
- netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = rx_queue->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
- efx_rx_queue_index(rx_queue));
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
-static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
-{
- efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
-}
-
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3229,106 +2460,20 @@ efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
/* nothing to do */
}
-static int efx_ef10_ev_probe(struct efx_channel *channel)
-{
- return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
- (channel->eventq_mask + 1) *
- sizeof(efx_qword_t),
- GFP_KERNEL);
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = channel->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
static int efx_ef10_ev_init(struct efx_channel *channel)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
- EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
- size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- size_t inlen, outlen;
unsigned int enabled, implemented;
- dma_addr_t dma_addr;
+ bool use_v2, cut_thru;
int rc;
- int i;
nic_data = efx->nic_data;
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
-
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
- /* INIT_EVQ expects index in vector table, not absolute */
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
- MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
- MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
-
- if (nic_data->datapath_caps2 &
- 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
- /* Use the new generic approach to specifying event queue
- * configuration, requesting lower latency or higher throughput.
- * The options that actually get used appear in the output.
- */
- MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
- INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_V2_IN_FLAG_TYPE,
- MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
- } else {
- bool cut_thru = !(nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
-
- MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
- INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_IN_FLAG_RX_MERGE, 1,
- INIT_EVQ_IN_FLAG_TX_MERGE, 1,
- INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
- }
-
- dma_addr = channel->eventq.buf.dma_addr;
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
-
- if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
- netif_dbg(efx, drv, efx->net_dev,
- "Channel %d using event queue flags %08x\n",
- channel->channel,
- MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+ use_v2 = nic_data->datapath_caps2 &
+ 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
+ cut_thru = !(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+ rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
/* IRQ return is ignored */
if (channel->channel || rc)
@@ -3386,15 +2531,10 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
return 0;
fail:
- efx_ef10_ev_fini(channel);
+ efx_mcdi_ev_fini(channel);
return rc;
}
-static void efx_ef10_ev_remove(struct efx_channel *channel)
-{
- efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
-}
-
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
unsigned int rx_queue_label)
{
@@ -3976,9 +3116,9 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
if (efx->state != STATE_RECOVERY) {
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_ef10_rx_fini(rx_queue);
+ efx_mcdi_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_ef10_tx_fini(tx_queue);
+ efx_mcdi_tx_fini(tx_queue);
}
wait_event_timeout(efx->flush_wq,
@@ -4000,1538 +3140,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
atomic_set(&efx->active_queues, 0);
}
-/* Decide whether a filter should be exclusive or else should allow
- * delivery to additional recipients. Currently we decide that
- * filters for specific local unicast MAC and IP addresses are
- * exclusive.
- */
-static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
-{
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
- !is_multicast_ether_addr(spec->loc_mac))
- return true;
-
- if ((spec->match_flags &
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
- if (spec->ether_type == htons(ETH_P_IP) &&
- !ipv4_is_multicast(spec->loc_host[0]))
- return true;
- if (spec->ether_type == htons(ETH_P_IPV6) &&
- ((const u8 *)spec->loc_host)[0] != 0xff)
- return true;
- }
-
- return false;
-}
-
-static struct efx_filter_spec *
-efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
- unsigned int filter_idx)
-{
- return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
- ~EFX_EF10_FILTER_FLAGS);
-}
-
-static unsigned int
-efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
- unsigned int filter_idx)
-{
- return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
-}
-
-static void
-efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
- unsigned int filter_idx,
- const struct efx_filter_spec *spec,
- unsigned int flags)
-{
- table->entry[filter_idx].spec = (unsigned long)spec | flags;
-}
-
-static void
-efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- efx_dword_t *inbuf)
-{
- enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
- u32 match_fields = 0, uc_match, mc_match;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_INSERT :
- MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
-
- /* Convert match flags and values. Unlike almost
- * everything else in MCDI, these fields are in
- * network byte order.
- */
-#define COPY_VALUE(value, mcdi_field) \
- do { \
- match_fields |= \
- 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN; \
- BUILD_BUG_ON( \
- MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
- sizeof(value)); \
- memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
- &value, sizeof(value)); \
- } while (0)
-#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
- if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
- COPY_VALUE(spec->gen_field, mcdi_field); \
- }
- /* Handle encap filters first. They will always be mismatch
- * (unknown UC or MC) filters
- */
- if (encap_type) {
- /* ether_type and outer_ip_proto need to be variables
- * because COPY_VALUE wants to memcpy them
- */
- __be16 ether_type =
- htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
- ETH_P_IPV6 : ETH_P_IP);
- u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
- u8 outer_ip_proto;
-
- switch (encap_type & EFX_ENCAP_TYPES_MASK) {
- case EFX_ENCAP_TYPE_VXLAN:
- vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
- /* fallthrough */
- case EFX_ENCAP_TYPE_GENEVE:
- COPY_VALUE(ether_type, ETHER_TYPE);
- outer_ip_proto = IPPROTO_UDP;
- COPY_VALUE(outer_ip_proto, IP_PROTO);
- /* We always need to set the type field, even
- * though we're not matching on the TNI.
- */
- MCDI_POPULATE_DWORD_1(inbuf,
- FILTER_OP_EXT_IN_VNI_OR_VSID,
- FILTER_OP_EXT_IN_VNI_TYPE,
- vni_type);
- break;
- case EFX_ENCAP_TYPE_NVGRE:
- COPY_VALUE(ether_type, ETHER_TYPE);
- outer_ip_proto = IPPROTO_GRE;
- COPY_VALUE(outer_ip_proto, IP_PROTO);
- break;
- default:
- WARN_ON(1);
- }
-
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
- } else {
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
- }
-
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
- match_fields |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << mc_match :
- 1 << uc_match;
- COPY_FIELD(REM_HOST, rem_host, SRC_IP);
- COPY_FIELD(LOC_HOST, loc_host, DST_IP);
- COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
- COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
- COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
- COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
- COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
- COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
- COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
- COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
-#undef COPY_FIELD
-#undef COPY_VALUE
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
- match_fields);
-}
-
-static void efx_ef10_filter_push_prep(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- efx_dword_t *inbuf, u64 handle,
- struct efx_rss_context *ctx,
- bool replacing)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u32 flags = spec->flags;
-
- memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
-
- /* If RSS filter, caller better have given us an RSS context */
- if (flags & EFX_FILTER_FLAG_RX_RSS) {
- /* We don't have the ability to return an error, so we'll just
- * log a warning and disable RSS for the filter.
- */
- if (WARN_ON_ONCE(!ctx))
- flags &= ~EFX_FILTER_FLAG_RX_RSS;
- else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
- flags &= ~EFX_FILTER_FLAG_RX_RSS;
- }
-
- if (replacing) {
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_REPLACE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
- } else {
- efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
- }
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
- MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
- MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
- MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
- 0 : spec->dmaq_id);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
- (flags & EFX_FILTER_FLAG_RX_RSS) ?
- MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
- MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
- if (flags & EFX_FILTER_FLAG_RX_RSS)
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
-}
-
-static int efx_ef10_filter_push(struct efx_nic *efx,
- const struct efx_filter_spec *spec, u64 *handle,
- struct efx_rss_context *ctx, bool replacing)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
- size_t outlen;
- int rc;
-
- efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc && spec->priority != EFX_FILTER_PRI_HINT)
- efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
- outbuf, outlen, rc);
- if (rc == 0)
- *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
- if (rc == -ENOSPC)
- rc = -EBUSY; /* to match efx_farch_filter_insert() */
- return rc;
-}
-
-static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
-{
- enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
- unsigned int match_flags = spec->match_flags;
- unsigned int uc_match, mc_match;
- u32 mcdi_flags = 0;
-
-#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
- unsigned int old_match_flags = match_flags; \
- match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
- if (match_flags != old_match_flags) \
- mcdi_flags |= \
- (1 << ((encap) ? \
- MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
- mcdi_field ## _LBN : \
- MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
- mcdi_field ## _LBN)); \
- }
- /* inner or outer based on encap type */
- MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
- /* always outer */
- MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
- MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
-#undef MAP_FILTER_TO_MCDI_FLAG
-
- /* special handling for encap type, and mismatch */
- if (encap_type) {
- match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
- mcdi_flags |=
- (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
- mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
-
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
- } else {
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
- }
-
- if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
- match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
- mcdi_flags |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << mc_match :
- 1 << uc_match;
- }
-
- /* Did we map them all? */
- WARN_ON_ONCE(match_flags);
-
- return mcdi_flags;
-}
-
-static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
- const struct efx_filter_spec *spec)
-{
- u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
- unsigned int match_pri;
-
- for (match_pri = 0;
- match_pri < table->rx_match_count;
- match_pri++)
- if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
- return match_pri;
-
- return -EPROTONOSUPPORT;
-}
-
-static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
-{
- DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_ef10_filter_table *table;
- struct efx_filter_spec *saved_spec;
- struct efx_rss_context *ctx = NULL;
- unsigned int match_pri, hash;
- unsigned int priv_flags;
- bool rss_locked = false;
- bool replacing = false;
- unsigned int depth, i;
- int ins_index = -1;
- DEFINE_WAIT(wait);
- bool is_mc_recip;
- s32 rc;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- table = efx->filter_state;
- down_write(&table->lock);
-
- /* For now, only support RX filters */
- if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
- EFX_FILTER_FLAG_RX) {
- rc = -EINVAL;
- goto out_unlock;
- }
-
- rc = efx_ef10_filter_pri(table, spec);
- if (rc < 0)
- goto out_unlock;
- match_pri = rc;
-
- hash = efx_filter_spec_hash(spec);
- is_mc_recip = efx_filter_is_mc_recipient(spec);
- if (is_mc_recip)
- bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
-
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- mutex_lock(&efx->rss_lock);
- rss_locked = true;
- if (spec->rss_context)
- ctx = efx_find_rss_context_entry(efx, spec->rss_context);
- else
- ctx = &efx->rss_context;
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
-
- /* Find any existing filters with the same match tuple or
- * else a free slot to insert at.
- */
- for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
-
- if (!saved_spec) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_spec_equal(spec, saved_spec)) {
- if (spec->priority < saved_spec->priority &&
- spec->priority != EFX_FILTER_PRI_AUTO) {
- rc = -EPERM;
- goto out_unlock;
- }
- if (!is_mc_recip) {
- /* This is the only one */
- if (spec->priority ==
- saved_spec->priority &&
- !replace_equal) {
- rc = -EEXIST;
- goto out_unlock;
- }
- ins_index = i;
- break;
- } else if (spec->priority >
- saved_spec->priority ||
- (spec->priority ==
- saved_spec->priority &&
- replace_equal)) {
- if (ins_index < 0)
- ins_index = i;
- else
- __set_bit(depth, mc_rem_map);
- }
- }
- }
-
- /* Once we reach the maximum search depth, use the first suitable
- * slot, or return -EBUSY if there was none
- */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out_unlock;
- }
-
- /* Create a software table entry if necessary. */
- saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
- if (saved_spec) {
- if (spec->priority == EFX_FILTER_PRI_AUTO &&
- saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
- /* Just make sure it won't be removed */
- if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
- saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
- table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- rc = ins_index;
- goto out_unlock;
- }
- replacing = true;
- priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
- } else {
- saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
- if (!saved_spec) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- *saved_spec = *spec;
- priv_flags = 0;
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
-
- /* Actually insert the filter on the HW */
- rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
- ctx, replacing);
-
- if (rc == -EINVAL && nic_data->must_realloc_vis)
- /* The MC rebooted under us, causing it to reject our filter
- * insertion as pointing to an invalid VI (spec->dmaq_id).
- */
- rc = -EAGAIN;
-
- /* Finalise the software table entry */
- if (rc == 0) {
- if (replacing) {
- /* Update the fields that may differ */
- if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
- saved_spec->flags |=
- EFX_FILTER_FLAG_RX_OVER_AUTO;
- saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
- saved_spec->flags |= spec->flags;
- saved_spec->rss_context = spec->rss_context;
- saved_spec->dmaq_id = spec->dmaq_id;
- }
- } else if (!replacing) {
- kfree(saved_spec);
- saved_spec = NULL;
- } else {
- /* We failed to replace, so the old filter is still present.
- * Roll back the software table to reflect this. In fact the
- * efx_ef10_filter_set_entry() call below will do the right
- * thing, so nothing extra is needed here.
- */
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
-
- /* Remove and finalise entries for lower-priority multicast
- * recipients
- */
- if (is_mc_recip) {
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- unsigned int depth, i;
-
- memset(inbuf, 0, sizeof(inbuf));
-
- for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- if (!test_bit(depth, mc_rem_map))
- continue;
-
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
- priv_flags = efx_ef10_filter_entry_flags(table, i);
-
- if (rc == 0) {
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[i].handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- }
-
- if (rc == 0) {
- kfree(saved_spec);
- saved_spec = NULL;
- priv_flags = 0;
- }
- efx_ef10_filter_set_entry(table, i, saved_spec,
- priv_flags);
- }
- }
-
- /* If successful, return the inserted filter ID */
- if (rc == 0)
- rc = efx_ef10_make_filter_id(match_pri, ins_index);
-
-out_unlock:
- if (rss_locked)
- mutex_unlock(&efx->rss_lock);
- up_write(&table->lock);
- return rc;
-}
-
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
-{
- s32 ret;
-
- down_read(&efx->filter_sem);
- ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
- up_read(&efx->filter_sem);
-
- return ret;
-}
-
-static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
-{
- /* no need to do anything here on EF10 */
-}
-
-/* Remove a filter.
- * If !by_index, remove by ID
- * If by_index, remove by index
- * Filter ID may come from userland and must be range-checked.
- * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
- * for write.
- */
-static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- unsigned int priority_mask,
- u32 filter_id, bool by_index)
-{
- unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FILTER_OP_IN_HANDLE_OFST +
- MC_CMD_FILTER_OP_IN_HANDLE_LEN);
- struct efx_filter_spec *spec;
- DEFINE_WAIT(wait);
- int rc;
-
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec ||
- (!by_index &&
- efx_ef10_filter_pri(table, spec) !=
- efx_ef10_filter_get_unsafe_pri(filter_id)))
- return -ENOENT;
-
- if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
- priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
- /* Just remove flags */
- spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
- table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- return 0;
- }
-
- if (!(priority_mask & (1U << spec->priority)))
- return -ENOENT;
-
- if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
- /* Reset to an automatic filter */
-
- struct efx_filter_spec new_spec = *spec;
-
- new_spec.priority = EFX_FILTER_PRI_AUTO;
- new_spec.flags = (EFX_FILTER_FLAG_RX |
- (efx_rss_active(&efx->rss_context) ?
- EFX_FILTER_FLAG_RX_RSS : 0));
- new_spec.dmaq_id = 0;
- new_spec.rss_context = 0;
- rc = efx_ef10_filter_push(efx, &new_spec,
- &table->entry[filter_idx].handle,
- &efx->rss_context,
- true);
-
- if (rc == 0)
- *spec = new_spec;
- } else {
- /* Really remove the filter */
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_REMOVE :
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf), NULL, 0, NULL);
-
- if ((rc == 0) || (rc == -ENOENT)) {
- /* Filter removed OK or didn't actually exist */
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- } else {
- efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
- MC_CMD_FILTER_OP_EXT_IN_LEN,
- NULL, 0, rc);
- }
- }
-
- return rc;
-}
-
-static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_ef10_filter_table *table;
- int rc;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
- false);
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-/* Caller must hold efx->filter_sem for read */
-static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
-
- if (filter_id == EFX_EF10_FILTER_ID_INVALID)
- return;
-
- down_write(&table->lock);
- efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
- true);
- up_write(&table->lock);
-}
-
-static int efx_ef10_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec)
-{
- unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- const struct efx_filter_spec *saved_spec;
- struct efx_ef10_filter_table *table;
- int rc;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
- saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (saved_spec && saved_spec->priority == priority &&
- efx_ef10_filter_pri(table, saved_spec) ==
- efx_ef10_filter_get_unsafe_pri(filter_id)) {
- *spec = *saved_spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_ef10_filter_table *table;
- unsigned int priority_mask;
- unsigned int i;
- int rc;
-
- priority_mask = (((1U << (priority + 1)) - 1) &
- ~(1U << EFX_FILTER_PRI_AUTO));
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- rc = efx_ef10_filter_remove_internal(efx, priority_mask,
- i, true);
- if (rc && rc != -ENOENT)
- break;
- rc = 0;
- }
-
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_ef10_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- if (table->entry[filter_idx].spec &&
- efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
- priority)
- ++count;
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return count;
-}
-
-static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
-
- return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
-}
-
-static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_ef10_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- s32 count = 0;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (spec && spec->priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- break;
- }
- buf[count++] =
- efx_ef10_make_filter_id(
- efx_ef10_filter_pri(table, spec),
- filter_idx);
- }
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return count;
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int filter_idx)
-{
- struct efx_filter_spec *spec, saved_spec;
- struct efx_ef10_filter_table *table;
- struct efx_arfs_rule *rule = NULL;
- bool ret = true, force = false;
- u16 arfs_id;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
-
- if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
- goto out_unlock;
-
- spin_lock_bh(&efx->rps_hash_lock);
- if (!efx->rps_hash_table) {
- /* In the absence of the table, we always return 0 to ARFS. */
- arfs_id = 0;
- } else {
- rule = efx_rps_hash_find(efx, spec);
- if (!rule)
- /* ARFS table doesn't know of this filter, so remove it */
- goto expire;
- arfs_id = rule->arfs_id;
- ret = efx_rps_check_rule(rule, filter_idx, &force);
- if (force)
- goto expire;
- if (!ret) {
- spin_unlock_bh(&efx->rps_hash_lock);
- goto out_unlock;
- }
- }
- if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
- ret = false;
- else if (rule)
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
-expire:
- saved_spec = *spec; /* remove operation will kfree spec */
- spin_unlock_bh(&efx->rps_hash_lock);
- /* At this point (since we dropped the lock), another thread might queue
- * up a fresh insertion request (but the actual insertion will be held
- * up by our possession of the filter table lock). In that case, it
- * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
- * the rule is not removed by efx_rps_hash_del() below.
- */
- if (ret)
- ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
- filter_idx, true) == 0;
- /* While we can't safely dereference rule (we dropped the lock), we can
- * still test it for NULL.
- */
- if (ret && rule) {
- /* Expiring, so remove entry from ARFS table */
- spin_lock_bh(&efx->rps_hash_lock);
- efx_rps_hash_del(efx, &saved_spec);
- spin_unlock_bh(&efx->rps_hash_lock);
- }
-out_unlock:
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return ret;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
-
-static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
-{
- int match_flags = 0;
-
-#define MAP_FLAG(gen_flag, mcdi_field) do { \
- u32 old_mcdi_flags = mcdi_flags; \
- mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
- if (mcdi_flags != old_mcdi_flags) \
- match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
- } while (0)
-
- if (encap) {
- /* encap filters must specify encap type */
- match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
- /* and imply ethertype and ip proto */
- mcdi_flags &=
- ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
- mcdi_flags &=
- ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
- /* VLAN tags refer to the outer packet */
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- /* everything else refers to the inner packet */
- MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, IFRM_SRC_IP);
- MAP_FLAG(LOC_HOST, IFRM_DST_IP);
- MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
- MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
- MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
- MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
- MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
- MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
- } else {
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, SRC_IP);
- MAP_FLAG(LOC_HOST, DST_IP);
- MAP_FLAG(REM_MAC, SRC_MAC);
- MAP_FLAG(REM_PORT, SRC_PORT);
- MAP_FLAG(LOC_MAC, DST_MAC);
- MAP_FLAG(LOC_PORT, DST_PORT);
- MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FLAG(IP_PROTO, IP_PROTO);
- }
-#undef MAP_FLAG
-
- /* Did we map them all? */
- if (mcdi_flags)
- return -EINVAL;
-
- return match_flags;
-}
-
-static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan, *next_vlan;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- if (!table)
- return;
-
- list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
- efx_ef10_filter_del_vlan_internal(efx, vlan);
-}
-
-static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
- bool encap,
- enum efx_filter_match_flags match_flags)
-{
- unsigned int match_pri;
- int mf;
-
- for (match_pri = 0;
- match_pri < table->rx_match_count;
- match_pri++) {
- mf = efx_ef10_filter_match_flags_from_mcdi(encap,
- table->rx_match_mcdi_flags[match_pri]);
- if (mf == match_flags)
- return true;
- }
-
- return false;
-}
-
-static int
-efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
- struct efx_ef10_filter_table *table,
- bool encap)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
- unsigned int pd_match_pri, pd_match_count;
- size_t outlen;
- int rc;
-
- /* Find out which RX filter types are supported, and their priorities */
- MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
- encap ?
- MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
- MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
- inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
- &outlen);
- if (rc)
- return rc;
-
- pd_match_count = MCDI_VAR_ARRAY_LEN(
- outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
-
- for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
- u32 mcdi_flags =
- MCDI_ARRAY_DWORD(
- outbuf,
- GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
- pd_match_pri);
- rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
- if (rc < 0) {
- netif_dbg(efx, probe, efx->net_dev,
- "%s: fw flags %#x pri %u not supported in driver\n",
- __func__, mcdi_flags, pd_match_pri);
- } else {
- netif_dbg(efx, probe, efx->net_dev,
- "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
- __func__, mcdi_flags, pd_match_pri,
- rc, table->rx_match_count);
- table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
- table->rx_match_count++;
- }
- }
-
- return 0;
-}
-
-static int efx_ef10_filter_table_probe(struct efx_nic *efx)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
- struct efx_ef10_filter_table *table;
- struct efx_ef10_vlan *vlan;
- int rc;
-
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- if (efx->filter_state) /* already probed */
- return 0;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- table->rx_match_count = 0;
- rc = efx_ef10_filter_table_probe_matches(efx, table, false);
- if (rc)
- goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
- rc = efx_ef10_filter_table_probe_matches(efx, table, true);
- if (rc)
- goto fail;
- if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(efx_ef10_filter_match_supported(table, false,
- (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
- efx_ef10_filter_match_supported(table, false,
- (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
- netif_info(efx, probe, net_dev,
- "VLAN filters are not supported in this firmware variant\n");
- net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- }
-
- table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS,
- sizeof(*table->entry)));
- if (!table->entry) {
- rc = -ENOMEM;
- goto fail;
- }
-
- table->mc_promisc_last = false;
- table->vlan_filter =
- !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
- INIT_LIST_HEAD(&table->vlan_list);
- init_rwsem(&table->lock);
-
- efx->filter_state = table;
-
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
- return 0;
-
-fail_add_vlan:
- efx_ef10_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
-fail:
- kfree(table);
- return rc;
-}
-
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_table_restore(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- unsigned int invalid_filters = 0, failed = 0;
- struct efx_ef10_filter_vlan *vlan;
- struct efx_filter_spec *spec;
- struct efx_rss_context *ctx;
- unsigned int filter_idx;
- u32 mcdi_flags;
- int match_pri;
- int rc, i;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
-
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
- return;
-
- down_write(&table->lock);
- mutex_lock(&efx->rss_lock);
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec)
- continue;
-
- mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
- match_pri = 0;
- while (match_pri < table->rx_match_count &&
- table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
- ++match_pri;
- if (match_pri >= table->rx_match_count) {
- invalid_filters++;
- goto not_restored;
- }
- if (spec->rss_context)
- ctx = efx_find_rss_context_entry(efx, spec->rss_context);
- else
- ctx = &efx->rss_context;
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- if (!ctx) {
- netif_warn(efx, drv, efx->net_dev,
- "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
- spec->rss_context);
- invalid_filters++;
- goto not_restored;
- }
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- netif_warn(efx, drv, efx->net_dev,
- "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
- spec->rss_context);
- invalid_filters++;
- goto not_restored;
- }
- }
-
- rc = efx_ef10_filter_push(efx, spec,
- &table->entry[filter_idx].handle,
- ctx, false);
- if (rc)
- failed++;
-
- if (rc) {
-not_restored:
- list_for_each_entry(vlan, &table->vlan_list, list)
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
- if (vlan->default_filters[i] == filter_idx)
- vlan->default_filters[i] =
- EFX_EF10_FILTER_ID_INVALID;
-
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- }
- }
-
- mutex_unlock(&efx->rss_lock);
- up_write(&table->lock);
-
- /* This can happen validly if the MC's capabilities have changed, so
- * is not an error.
- */
- if (invalid_filters)
- netif_dbg(efx, drv, efx->net_dev,
- "Did not restore %u filters that are now unsupported.\n",
- invalid_filters);
-
- if (failed)
- netif_err(efx, hw, efx->net_dev,
- "unable to restore %u filters\n", failed);
- else
- nic_data->must_restore_filters = false;
-}
-
-static void efx_ef10_filter_table_remove(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- int rc;
-
- efx_ef10_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
- /* If we were called without locking, then it's not safe to free
- * the table as others might be using it. So we just WARN, leak
- * the memory, and potentially get an inconsistent filter table
- * state.
- * This should never actually happen.
- */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- if (!table)
- return;
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec)
- continue;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_REMOVE :
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
- if (rc)
- netif_info(efx, drv, efx->net_dev,
- "%s: filter %04x remove failed\n",
- __func__, filter_idx);
- kfree(spec);
- }
-
- vfree(table->entry);
- kfree(table);
-}
-
-static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int filter_idx;
-
- efx_rwsem_assert_write_locked(&table->lock);
-
- if (*id != EFX_EF10_FILTER_ID_INVALID) {
- filter_idx = efx_ef10_filter_get_unsafe_id(*id);
- if (!table->entry[filter_idx].spec)
- netif_dbg(efx, drv, efx->net_dev,
- "marked null spec old %04x:%04x\n", *id,
- filter_idx);
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- *id = EFX_EF10_FILTER_ID_INVALID;
- }
-}
-
-/* Mark old per-VLAN filters that may need to be removed */
-static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int i;
-
- for (i = 0; i < table->dev_uc_count; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
- for (i = 0; i < table->dev_mc_count; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
-}
-
-/* Mark old filters that may need to be removed.
- * Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_mark_old(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
-
- down_write(&table->lock);
- list_for_each_entry(vlan, &table->vlan_list, list)
- _efx_ef10_filter_vlan_mark_old(efx, vlan);
- up_write(&table->lock);
-}
-
-static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct netdev_hw_addr *uc;
- unsigned int i;
-
- table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
- ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
- i = 1;
- netdev_for_each_uc_addr(uc, net_dev) {
- if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
- table->uc_promisc = true;
- break;
- }
- ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- i++;
- }
-
- table->dev_uc_count = i;
-}
-
-static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct netdev_hw_addr *mc;
- unsigned int i;
-
- table->mc_overflow = false;
- table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
-
- i = 0;
- netdev_for_each_mc_addr(mc, net_dev) {
- if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
- table->mc_promisc = true;
- table->mc_overflow = true;
- break;
- }
- ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- i++;
- }
-
- table->dev_mc_count = i;
-}
-
-static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan,
- bool multicast, bool rollback)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_dev_addr *addr_list;
- enum efx_filter_flags filter_flags;
- struct efx_filter_spec spec;
- u8 baddr[ETH_ALEN];
- unsigned int i, j;
- int addr_count;
- u16 *ids;
- int rc;
-
- if (multicast) {
- addr_list = table->dev_mc_list;
- addr_count = table->dev_mc_count;
- ids = vlan->mc;
- } else {
- addr_list = table->dev_uc_list;
- addr_count = table->dev_uc_count;
- ids = vlan->uc;
- }
-
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
-
- /* Insert/renew filters */
- for (i = 0; i < addr_count; i++) {
- EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- if (rollback) {
- netif_info(efx, drv, efx->net_dev,
- "efx_ef10_filter_insert failed rc=%d\n",
- rc);
- /* Fall back to promiscuous */
- for (j = 0; j < i; j++) {
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- ids[j]);
- ids[j] = EFX_EF10_FILTER_ID_INVALID;
- }
- return rc;
- } else {
- /* keep invalid ID, and carry on */
- }
- } else {
- ids[i] = efx_ef10_filter_get_unsafe_id(rc);
- }
- }
-
- if (multicast && rollback) {
- /* Also need an Ethernet broadcast filter */
- EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
- EFX_EF10_FILTER_ID_INVALID);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "Broadcast filter insert failed rc=%d\n", rc);
- /* Fall back to promiscuous */
- for (j = 0; j < i; j++) {
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- ids[j]);
- ids[j] = EFX_EF10_FILTER_ID_INVALID;
- }
- return rc;
- } else {
- vlan->default_filters[EFX_EF10_BCAST] =
- efx_ef10_filter_get_unsafe_id(rc);
- }
- }
-
- return 0;
-}
-
-static int efx_ef10_filter_insert_def(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan,
- enum efx_encap_type encap_type,
- bool multicast, bool rollback)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- enum efx_filter_flags filter_flags;
- struct efx_filter_spec spec;
- u8 baddr[ETH_ALEN];
- int rc;
- u16 *id;
-
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
-
- if (multicast)
- efx_filter_set_mc_def(&spec);
- else
- efx_filter_set_uc_def(&spec);
-
- if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
- efx_filter_set_encap_type(&spec, encap_type);
- else
- /* don't insert encap filters on non-supporting
- * platforms. ID will be left as INVALID.
- */
- return 0;
- }
-
- if (vlan->vid != EFX_FILTER_VID_UNSPEC)
- efx_filter_set_eth_local(&spec, vlan->vid, NULL);
-
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- const char *um = multicast ? "Multicast" : "Unicast";
- const char *encap_name = "";
- const char *encap_ipv = "";
-
- if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_VXLAN)
- encap_name = "VXLAN ";
- else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_NVGRE)
- encap_name = "NVGRE ";
- else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_GENEVE)
- encap_name = "GENEVE ";
- if (encap_type & EFX_ENCAP_FLAG_IPV6)
- encap_ipv = "IPv6 ";
- else if (encap_type)
- encap_ipv = "IPv4 ";
-
- /* unprivileged functions can't insert mismatch filters
- * for encapsulated or unicast traffic, so downgrade
- * those warnings to debug.
- */
- netif_cond_dbg(efx, drv, efx->net_dev,
- rc == -EPERM && (encap_type || !multicast), warn,
- "%s%s%s mismatch filter insert failed rc=%d\n",
- encap_name, encap_ipv, um, rc);
- } else if (multicast) {
- /* mapping from encap types to default filter IDs (multicast) */
- static enum efx_ef10_default_filters map[] = {
- [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
- [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
- [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
- [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
- [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_VXLAN6_MCDEF,
- [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_NVGRE6_MCDEF,
- [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_GENEVE6_MCDEF,
- };
-
- /* quick bounds check (BCAST result impossible) */
- BUILD_BUG_ON(EFX_EF10_BCAST != 0);
- if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
- /* then follow map */
- id = &vlan->default_filters[map[encap_type]];
-
- EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
- *id = efx_ef10_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
- /* Also need an Ethernet broadcast filter */
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- filter_flags, 0);
- eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "Broadcast filter insert failed rc=%d\n",
- rc);
- if (rollback) {
- /* Roll back the mc_def filter */
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- *id);
- *id = EFX_EF10_FILTER_ID_INVALID;
- return rc;
- }
- } else {
- EFX_WARN_ON_PARANOID(
- vlan->default_filters[EFX_EF10_BCAST] !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->default_filters[EFX_EF10_BCAST] =
- efx_ef10_filter_get_unsafe_id(rc);
- }
- }
- rc = 0;
- } else {
- /* mapping from encap types to default filter IDs (unicast) */
- static enum efx_ef10_default_filters map[] = {
- [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
- [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
- [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
- [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
- [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_VXLAN6_UCDEF,
- [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_NVGRE6_UCDEF,
- [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_GENEVE6_UCDEF,
- };
-
- /* quick bounds check (BCAST result impossible) */
- BUILD_BUG_ON(EFX_EF10_BCAST != 0);
- if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
- /* then follow map */
- id = &vlan->default_filters[map[encap_type]];
- EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
- *id = rc;
- rc = 0;
- }
- return rc;
-}
-
-/* Remove filters that weren't renewed. */
-static void efx_ef10_filter_remove_old(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- int remove_failed = 0;
- int remove_noent = 0;
- int rc;
- int i;
-
- down_write(&table->lock);
- for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (READ_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_AUTO_OLD) {
- rc = efx_ef10_filter_remove_internal(efx,
- 1U << EFX_FILTER_PRI_AUTO, i, true);
- if (rc == -ENOENT)
- remove_noent++;
- else if (rc)
- remove_failed++;
- }
- }
- up_write(&table->lock);
-
- if (remove_failed)
- netif_info(efx, drv, efx->net_dev,
- "%s: failed to remove %d filters\n",
- __func__, remove_failed);
- if (remove_noent)
- netif_info(efx, drv, efx->net_dev,
- "%s: failed to remove %d non-existent filters\n",
- __func__, remove_noent);
-}
-
static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -5545,7 +3153,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
down_write(&efx->filter_sem);
- efx_ef10_filter_table_remove(efx);
+ efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
@@ -5577,7 +3185,7 @@ restore_vadaptor:
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_ef10_filter_table_probe(efx);
+ rc2 = efx_mcdi_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -5598,256 +3206,6 @@ reset_nic:
return rc ? rc : rc2;
}
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- /* Do not install unspecified VID if VLAN filtering is enabled.
- * Do not install all specified VIDs if VLAN filtering is disabled.
- */
- if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
- return;
-
- /* Insert/renew unicast filters */
- if (table->uc_promisc) {
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
- false, false);
- efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
- } else {
- /* If any of the filters failed to insert, fall back to
- * promiscuous mode - add in the uc_def filter. But keep
- * our individual unicast filters.
- */
- if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
- efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- false, false);
- }
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
-
- /* Insert/renew multicast filters */
- /* If changing promiscuous state with cascaded multicast filters, remove
- * old filters first, so that packets are dropped rather than duplicated
- */
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
- efx_ef10_filter_remove_old(efx);
- if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
- /* If we failed to insert promiscuous filters, rollback
- * and fall back to individual multicast filters
- */
- if (efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, true)) {
- /* Changing promisc state, so remove old filters */
- efx_ef10_filter_remove_old(efx);
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- } else {
- /* If we failed to insert promiscuous filters, don't
- * rollback. Regardless, also insert the mc_list,
- * unless it's incomplete due to overflow
- */
- efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, false);
- if (!table->mc_overflow)
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- } else {
- /* If any filters failed to insert, rollback and fall back to
- * promiscuous mode - mc_def filter and maybe broadcast. If
- * that fails, roll back again and insert as many of our
- * individual multicast filters as we can.
- */
- if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
- /* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
- efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, true))
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- }
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
-}
-
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct efx_ef10_filter_vlan *vlan;
- bool vlan_filter;
-
- if (!efx_dev_registered(efx))
- return;
-
- if (!table)
- return;
-
- efx_ef10_filter_mark_old(efx);
-
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
- */
- netif_addr_lock_bh(net_dev);
- efx_ef10_filter_uc_addr_list(efx);
- efx_ef10_filter_mc_addr_list(efx);
- netif_addr_unlock_bh(net_dev);
-
- /* If VLAN filtering changes, all old filters are finally removed.
- * Do it in advance to avoid conflicts for unicast untagged and
- * VLAN 0 tagged filters.
- */
- vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
- if (table->vlan_filter != vlan_filter) {
- table->vlan_filter = vlan_filter;
- efx_ef10_filter_remove_old(efx);
- }
-
- list_for_each_entry(vlan, &table->vlan_list, list)
- efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
-
- efx_ef10_filter_remove_old(efx);
- table->mc_promisc_last = table->mc_promisc;
-}
-
-static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
-
- list_for_each_entry(vlan, &table->vlan_list, list) {
- if (vlan->vid == vid)
- return vlan;
- }
-
- return NULL;
-}
-
-static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
- unsigned int i;
-
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- vlan = efx_ef10_filter_find_vlan(efx, vid);
- if (WARN_ON(vlan)) {
- netif_err(efx, drv, efx->net_dev,
- "VLAN %u already added\n", vid);
- return -EALREADY;
- }
-
- vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return -ENOMEM;
-
- vlan->vid = vid;
-
- for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
- vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
- for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
- vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
-
- list_add_tail(&vlan->list, &table->vlan_list);
-
- if (efx_dev_registered(efx))
- efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
-
- return 0;
-}
-
-static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- unsigned int i;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- list_del(&vlan->list);
-
- for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->uc[i]);
- for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->mc[i]);
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->default_filters[i]);
-
- kfree(vlan);
-}
-
-static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_vlan *vlan;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- vlan = efx_ef10_filter_find_vlan(efx, vid);
- if (!vlan) {
- netif_err(efx, drv, efx->net_dev,
- "VLAN %u not found in filter state\n", vid);
- return;
- }
-
- efx_ef10_filter_del_vlan_internal(efx, vlan);
-}
-
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -5860,7 +3218,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- efx_ef10_filter_table_remove(efx);
+ efx_mcdi_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -5869,7 +3227,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_ef10_filter_table_probe(efx);
+ efx_mcdi_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -5931,14 +3289,14 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
- efx_ef10_filter_sync_rx_mode(efx);
+ efx_mcdi_filter_sync_rx_mode(efx);
return efx_mcdi_set_mac(efx);
}
static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
{
- efx_ef10_filter_sync_rx_mode(efx);
+ efx_mcdi_filter_sync_rx_mode(efx);
return 0;
}
@@ -6650,36 +4008,36 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
- .tx_remove = efx_ef10_tx_remove,
+ .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
- .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
- .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
- .rx_probe = efx_ef10_rx_probe,
- .rx_init = efx_ef10_rx_init,
- .rx_remove = efx_ef10_rx_remove,
+ .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
- .ev_probe = efx_ef10_ev_probe,
+ .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
- .ev_fini = efx_ef10_ev_fini,
- .ev_remove = efx_ef10_ev_remove,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_ef10_filter_table_probe,
- .filter_table_restore = efx_ef10_filter_table_restore,
- .filter_table_remove = efx_ef10_filter_table_remove,
- .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
- .filter_insert = efx_ef10_filter_insert,
- .filter_remove_safe = efx_ef10_filter_remove_safe,
- .filter_get_safe = efx_ef10_filter_get_safe,
- .filter_clear_rx = efx_ef10_filter_clear_rx,
- .filter_count_rx_used = efx_ef10_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+ .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_port_dummy_op_int,
@@ -6709,7 +4067,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
- .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
@@ -6759,39 +4117,39 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
- .tx_remove = efx_ef10_tx_remove,
+ .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
- .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
- .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
- .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
- .rx_probe = efx_ef10_rx_probe,
- .rx_init = efx_ef10_rx_init,
- .rx_remove = efx_ef10_rx_remove,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
- .ev_probe = efx_ef10_ev_probe,
+ .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
- .ev_fini = efx_ef10_ev_fini,
- .ev_remove = efx_ef10_ev_remove,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_ef10_filter_table_probe,
- .filter_table_restore = efx_ef10_filter_table_restore,
- .filter_table_remove = efx_ef10_filter_table_remove,
- .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
- .filter_insert = efx_ef10_filter_insert,
- .filter_remove_safe = efx_ef10_filter_remove_safe,
- .filter_get_safe = efx_ef10_filter_get_safe,
- .filter_clear_rx = efx_ef10_filter_clear_rx,
- .filter_count_rx_used = efx_ef10_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+ .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_ef10_mtd_probe,
@@ -6844,7 +4202,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
- .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 52bd43f45761..14393767ef9f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -522,10 +522,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
if (!is_zero_ether_addr(mac)) {
rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
- if (rc) {
- eth_zero_addr(vf->mac);
+ if (rc)
goto fail;
- }
+
if (vf->efx)
ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 7a38d7f282a1..4481f21a1f43 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -23,6 +23,10 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "io.h"
#include "selftest.h"
@@ -39,56 +43,6 @@
**************************************************************************
*/
-/* Loopback mode names (see LOOPBACK_MODE()) */
-const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
-const char *const efx_loopback_mode_names[] = {
- [LOOPBACK_NONE] = "NONE",
- [LOOPBACK_DATA] = "DATAPATH",
- [LOOPBACK_GMAC] = "GMAC",
- [LOOPBACK_XGMII] = "XGMII",
- [LOOPBACK_XGXS] = "XGXS",
- [LOOPBACK_XAUI] = "XAUI",
- [LOOPBACK_GMII] = "GMII",
- [LOOPBACK_SGMII] = "SGMII",
- [LOOPBACK_XGBR] = "XGBR",
- [LOOPBACK_XFI] = "XFI",
- [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
- [LOOPBACK_GMII_FAR] = "GMII_FAR",
- [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
- [LOOPBACK_XFI_FAR] = "XFI_FAR",
- [LOOPBACK_GPHY] = "GPHY",
- [LOOPBACK_PHYXS] = "PHYXS",
- [LOOPBACK_PCS] = "PCS",
- [LOOPBACK_PMAPMD] = "PMA/PMD",
- [LOOPBACK_XPORT] = "XPORT",
- [LOOPBACK_XGMII_WS] = "XGMII_WS",
- [LOOPBACK_XAUI_WS] = "XAUI_WS",
- [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
- [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
- [LOOPBACK_GMII_WS] = "GMII_WS",
- [LOOPBACK_XFI_WS] = "XFI_WS",
- [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
- [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
-};
-
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
- [RESET_TYPE_DATAPATH] = "DATAPATH",
- [RESET_TYPE_MC_BIST] = "MC_BIST",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
- [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
-};
-
/* UDP tunnel type names */
static const char *const efx_udp_tunnel_type_names[] = {
[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
@@ -104,18 +58,6 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
snprintf(buf, buflen, "type %d", type);
}
-/* Reset workqueue. If any NIC has a hardware failure then a reset will be
- * queued onto this work queue. This is not a per-nic work queue, because
- * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
- */
-static struct workqueue_struct *reset_workqueue;
-
-/* How often and how many times to poll for a reset while waiting for a
- * BIST that another function started to complete.
- */
-#define BIST_WAIT_DELAY_MS 100
-#define BIST_WAIT_DELAY_COUNT 100
-
/**************************************************************************
*
* Configurable values
@@ -135,21 +77,6 @@ module_param(efx_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
-/* This is the time (in jiffies) between invocations of the hardware
- * monitor.
- * On Falcon-based NICs, this will:
- * - Check the on-board hardware monitor;
- * - Poll the link state and reconfigure the hardware as necessary.
- * On Siena-based NICs for power systems with EEH support, this will give EEH a
- * chance to start.
- */
-static unsigned int efx_monitor_interval = 1 * HZ;
-
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
@@ -169,38 +96,10 @@ static unsigned int rx_irq_mod_usec = 60;
*/
static unsigned int tx_irq_mod_usec = 150;
-/* This is the first interrupt mode to try out of:
- * 0 => MSI-X
- * 1 => MSI
- * 2 => legacy
- */
-static unsigned int interrupt_mode;
-
-/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
- * i.e. the number of CPUs among which we may distribute simultaneous
- * interrupt handling.
- *
- * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each core.
- */
-static unsigned int rss_cpus;
-module_param(rss_cpus, uint, 0444);
-MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
-
static bool phy_flash_cfg;
module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
-static unsigned irq_adapt_low_thresh = 8000;
-module_param(irq_adapt_low_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_low_thresh,
- "Threshold score for reducing IRQ moderation");
-
-static unsigned irq_adapt_high_thresh = 16000;
-module_param(irq_adapt_high_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_high_thresh,
- "Threshold score for increasing IRQ moderation");
-
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -214,18 +113,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static int efx_soft_enable_interrupts(struct efx_nic *efx);
-static void efx_soft_disable_interrupts(struct efx_nic *efx);
-static void efx_remove_channel(struct efx_channel *channel);
-static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
-static void efx_init_napi_channel(struct efx_channel *channel);
-static void efx_fini_napi(struct efx_nic *efx);
-static void efx_fini_napi_channel(struct efx_channel *channel);
-static void efx_fini_struct(struct efx_nic *efx);
-static void efx_start_all(struct efx_nic *efx);
-static void efx_stop_all(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
@@ -239,776 +128,12 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
ASSERT_RTNL(); \
} while (0)
-static int efx_check_disabled(struct efx_nic *efx)
-{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
- netif_err(efx, drv, efx->net_dev,
- "device is disabled due to earlier errors\n");
- return -EIO;
- }
- return 0;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- *
- *************************************************************************/
-
-/* Process channel's event queue
- *
- * This function is responsible for processing the event queue of a
- * single channel. The caller must guarantee that this function will
- * never be concurrently called more than once on the same channel,
- * though different channels may be being processed concurrently.
- */
-static int efx_process_channel(struct efx_channel *channel, int budget)
-{
- struct efx_tx_queue *tx_queue;
- struct list_head rx_list;
- int spent;
-
- if (unlikely(!channel->enabled))
- return 0;
-
- /* Prepare the batch receive list */
- EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
- INIT_LIST_HEAD(&rx_list);
- channel->rx_list = &rx_list;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->pkts_compl = 0;
- tx_queue->bytes_compl = 0;
- }
-
- spent = efx_nic_process_eventq(channel, budget);
- if (spent && efx_channel_has_rx_queue(channel)) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
- efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue, true);
- }
-
- /* Update BQL */
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->bytes_compl) {
- netdev_tx_completed_queue(tx_queue->core_txq,
- tx_queue->pkts_compl, tx_queue->bytes_compl);
- }
- }
-
- /* Receive any packets we queued up */
- netif_receive_skb_list(channel->rx_list);
- channel->rx_list = NULL;
-
- return spent;
-}
-
-/* NAPI poll handler
- *
- * NAPI guarantees serialisation of polls of the same device, which
- * provides the guarantee required by efx_process_channel().
- */
-static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
-{
- int step = efx->irq_mod_step_us;
-
- if (channel->irq_mod_score < irq_adapt_low_thresh) {
- if (channel->irq_moderation_us > step) {
- channel->irq_moderation_us -= step;
- efx->type->push_irq_moderation(channel);
- }
- } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
- if (channel->irq_moderation_us <
- efx->irq_rx_moderation_us) {
- channel->irq_moderation_us += step;
- efx->type->push_irq_moderation(channel);
- }
- }
-
- channel->irq_count = 0;
- channel->irq_mod_score = 0;
-}
-
-static int efx_poll(struct napi_struct *napi, int budget)
-{
- struct efx_channel *channel =
- container_of(napi, struct efx_channel, napi_str);
- struct efx_nic *efx = channel->efx;
- int spent;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "channel %d NAPI poll executing on CPU %d\n",
- channel->channel, raw_smp_processor_id());
-
- spent = efx_process_channel(channel, budget);
-
- xdp_do_flush_map();
-
- if (spent < budget) {
- if (efx_channel_has_rx_queue(channel) &&
- efx->irq_rx_adaptive &&
- unlikely(++channel->irq_count == 1000)) {
- efx_update_irq_mod(efx, channel);
- }
-
-#ifdef CONFIG_RFS_ACCEL
- /* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
-#endif
-
- /* There is no race here; although napi_disable() will
- * only wait for napi_complete(), this isn't a problem
- * since efx_nic_eventq_read_ack() will have no effect if
- * interrupts have already been disabled.
- */
- if (napi_complete_done(napi, spent))
- efx_nic_eventq_read_ack(channel);
- }
-
- return spent;
-}
-
-/* Create event queue
- * Event queue memory allocations are done only once. If the channel
- * is reset, the memory buffer will be reused; this guards against
- * errors during channel reset and also simplifies interrupt handling.
- */
-static int efx_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned long entries;
-
- netif_dbg(efx, probe, efx->net_dev,
- "chan %d create event queue\n", channel->channel);
-
- /* Build an event queue with room for one event per tx and rx buffer,
- * plus some extra for link state events and MCDI completions. */
- entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
- channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
-
- return efx_nic_probe_eventq(channel);
-}
-
-/* Prepare channel's event queue */
-static int efx_init_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- int rc;
-
- EFX_WARN_ON_PARANOID(channel->eventq_init);
-
- netif_dbg(efx, drv, efx->net_dev,
- "chan %d init event queue\n", channel->channel);
-
- rc = efx_nic_init_eventq(channel);
- if (rc == 0) {
- efx->type->push_irq_moderation(channel);
- channel->eventq_read_ptr = 0;
- channel->eventq_init = true;
- }
- return rc;
-}
-
-/* Enable event queue processing and NAPI */
-void efx_start_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, ifup, channel->efx->net_dev,
- "chan %d start event queue\n", channel->channel);
-
- /* Make sure the NAPI handler sees the enabled flag set */
- channel->enabled = true;
- smp_wmb();
-
- napi_enable(&channel->napi_str);
- efx_nic_eventq_read_ack(channel);
-}
-
-/* Disable event queue processing and NAPI */
-void efx_stop_eventq(struct efx_channel *channel)
-{
- if (!channel->enabled)
- return;
-
- napi_disable(&channel->napi_str);
- channel->enabled = false;
-}
-
-static void efx_fini_eventq(struct efx_channel *channel)
-{
- if (!channel->eventq_init)
- return;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d fini event queue\n", channel->channel);
-
- efx_nic_fini_eventq(channel);
- channel->eventq_init = false;
-}
-
-static void efx_remove_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d remove event queue\n", channel->channel);
-
- efx_nic_remove_eventq(channel);
-}
-
-/**************************************************************************
- *
- * Channel handling
- *
- *************************************************************************/
-
-/* Allocate and initialise a channel structure. */
-static struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- channel->efx = efx;
- channel->channel = i;
- channel->type = &efx_default_channel_type;
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- tx_queue->efx = efx;
- tx_queue->queue = i * EFX_TXQ_TYPES + j;
- tx_queue->channel = channel;
- }
-
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- rx_queue = &channel->rx_queue;
- rx_queue->efx = efx;
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-
- return channel;
-}
-
-/* Allocate and initialise a channel structure, copying parameters
- * (but not resources) from an old channel structure.
- */
-static struct efx_channel *
-efx_copy_channel(const struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kmalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- *channel = *old_channel;
-
- channel->napi_dev = NULL;
- INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
- channel->napi_str.napi_id = 0;
- channel->napi_str.state = 0;
- memset(&channel->eventq, 0, sizeof(channel->eventq));
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- if (tx_queue->channel)
- tx_queue->channel = channel;
- tx_queue->buffer = NULL;
- memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
- }
-
- rx_queue = &channel->rx_queue;
- rx_queue->buffer = NULL;
- memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- return channel;
-}
-
-static int efx_probe_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- netif_dbg(channel->efx, probe, channel->efx->net_dev,
- "creating channel %d\n", channel->channel);
-
- rc = channel->type->pre_probe(channel);
- if (rc)
- goto fail;
-
- rc = efx_probe_eventq(channel);
- if (rc)
- goto fail;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- rc = efx_probe_tx_queue(tx_queue);
- if (rc)
- goto fail;
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- rc = efx_probe_rx_queue(rx_queue);
- if (rc)
- goto fail;
- }
-
- channel->rx_list = NULL;
-
- return 0;
-
-fail:
- efx_remove_channel(channel);
- return rc;
-}
-
-static void
-efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
-{
- struct efx_nic *efx = channel->efx;
- const char *type;
- int number;
-
- number = channel->channel;
-
- if (number >= efx->xdp_channel_offset &&
- !WARN_ON_ONCE(!efx->n_xdp_channels)) {
- type = "-xdp";
- number -= efx->xdp_channel_offset;
- } else if (efx->tx_channel_offset == 0) {
- type = "";
- } else if (number < efx->tx_channel_offset) {
- type = "-rx";
- } else {
- type = "-tx";
- number -= efx->tx_channel_offset;
- }
- snprintf(buf, len, "%s%s-%d", efx->name, type, number);
-}
-
-static void efx_set_channel_names(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- channel->type->get_name(channel,
- efx->msi_context[channel->channel].name,
- sizeof(efx->msi_context[0].name));
-}
-
-static int efx_probe_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- int rc;
-
- /* Restart special buffer allocation */
- efx->next_buffer_table = 0;
-
- /* Probe channels in reverse, so that any 'extra' channels
- * use the start of the buffer table. This allows the traffic
- * channels to be resized without moving them or wasting the
- * entries before them.
- */
- efx_for_each_channel_rev(channel, efx) {
- rc = efx_probe_channel(channel);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to create channel %d\n",
- channel->channel);
- goto fail;
- }
- }
- efx_set_channel_names(efx);
-
- return 0;
-
-fail:
- efx_remove_channels(efx);
- return rc;
-}
-
-/* Channels are shutdown and reinitialised whilst the NIC is running
- * to propagate configuration changes (mtu, checksum offload), or
- * to clear hardware error conditions
- */
-static void efx_start_datapath(struct efx_nic *efx)
-{
- netdev_features_t old_features = efx->net_dev->features;
- bool old_rx_scatter = efx->rx_scatter;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- struct efx_channel *channel;
- size_t rx_buf_len;
-
- /* Calculate the rx buffer allocation parameters required to
- * support the current MTU, including padding for header
- * alignment and overruns.
- */
- efx->rx_dma_len = (efx->rx_prefix_size +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
- efx->rx_ip_align + efx->rx_dma_len);
- if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = efx->type->always_rx_scatter;
- efx->rx_buffer_order = 0;
- } else if (efx->type->can_rx_scatter) {
- BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
- BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
- 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
- EFX_RX_BUF_ALIGNMENT) >
- PAGE_SIZE);
- efx->rx_scatter = true;
- efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
- efx->rx_buffer_order = 0;
- } else {
- efx->rx_scatter = false;
- efx->rx_buffer_order = get_order(rx_buf_len);
- }
-
- efx_rx_config_page_split(efx);
- if (efx->rx_buffer_order)
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u; page order=%u batch=%u\n",
- efx->rx_dma_len, efx->rx_buffer_order,
- efx->rx_pages_per_batch);
- else
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
- efx->rx_dma_len, efx->rx_page_buf_step,
- efx->rx_bufs_per_page, efx->rx_pages_per_batch);
-
- /* Restore previously fixed features in hw_features and remove
- * features which are fixed now
- */
- efx->net_dev->hw_features |= efx->net_dev->features;
- efx->net_dev->hw_features &= ~efx->fixed_features;
- efx->net_dev->features |= efx->fixed_features;
- if (efx->net_dev->features != old_features)
- netdev_features_change(efx->net_dev);
-
- /* RX filters may also have scatter-enabled flags */
- if (efx->rx_scatter != old_rx_scatter)
- efx->type->filter_update_rx_scatter(efx);
-
- /* We must keep at least one descriptor in a TX ring empty.
- * We could avoid this when the queue size does not exactly
- * match the hardware ring size, but it's not that important.
- * Therefore we stop the queue when one more skb might fill
- * the ring completely. We wake it when half way back to
- * empty.
- */
- efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
- efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
-
- /* Initialise the channels */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_init_tx_queue(tx_queue);
- atomic_inc(&efx->active_queues);
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- efx_init_rx_queue(rx_queue);
- atomic_inc(&efx->active_queues);
- efx_stop_eventq(channel);
- efx_fast_push_rx_descriptors(rx_queue, false);
- efx_start_eventq(channel);
- }
-
- WARN_ON(channel->rx_pkt_n_frags);
- }
-
- efx_ptp_start_datapath(efx);
-
- if (netif_device_present(efx->net_dev))
- netif_tx_wake_all_queues(efx->net_dev);
-}
-
-static void efx_stop_datapath(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->port_enabled);
-
- efx_ptp_stop_datapath(efx);
-
- /* Stop RX refill */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- rx_queue->refill_enabled = false;
- }
-
- efx_for_each_channel(channel, efx) {
- /* RX packet processing is pipelined, so wait for the
- * NAPI handler to complete. At least event queue 0
- * might be kept active by non-data events, so don't
- * use napi_synchronize() but actually disable NAPI
- * temporarily.
- */
- if (efx_channel_has_rx_queue(channel)) {
- efx_stop_eventq(channel);
- efx_start_eventq(channel);
- }
- }
-
- rc = efx->type->fini_dmaq(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_fini_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_fini_tx_queue(tx_queue);
- }
- efx->xdp_rxq_info_failed = false;
-}
-
-static void efx_remove_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "destroy chan %d\n", channel->channel);
-
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_remove_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_remove_tx_queue(tx_queue);
- efx_remove_eventq(channel);
- channel->type->post_remove(channel);
-}
-
-static void efx_remove_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
-
- kfree(efx->xdp_tx_queues);
-}
-
-int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
-{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
- u32 old_rxq_entries, old_txq_entries;
- unsigned i, next_buffer_table = 0;
- int rc, rc2;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- /* Not all channels should be reallocated. We must avoid
- * reallocating their buffer table entries.
- */
- efx_for_each_channel(channel, efx) {
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
-
- if (channel->type->copy)
- continue;
- next_buffer_table = max(next_buffer_table,
- channel->eventq.index +
- channel->eventq.entries);
- efx_for_each_channel_rx_queue(rx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- rx_queue->rxd.index +
- rx_queue->rxd.entries);
- efx_for_each_channel_tx_queue(tx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- tx_queue->txd.index +
- tx_queue->txd.entries);
- }
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_soft_disable_interrupts(efx);
-
- /* Clone channels (where possible) */
- memset(other_channel, 0, sizeof(other_channel));
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (channel->type->copy)
- channel = channel->type->copy(channel);
- if (!channel) {
- rc = -ENOMEM;
- goto out;
- }
- other_channel[i] = channel;
- }
-
- /* Swap entry counts and channel pointers */
- old_rxq_entries = efx->rxq_entries;
- old_txq_entries = efx->txq_entries;
- efx->rxq_entries = rxq_entries;
- efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
-
- /* Restart buffer table allocation */
- efx->next_buffer_table = next_buffer_table;
-
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (!channel->type->copy)
- continue;
- rc = efx_probe_channel(channel);
- if (rc)
- goto rollback;
- efx_init_napi_channel(efx->channel[i]);
- }
-
-out:
- /* Destroy unused channel structures */
- for (i = 0; i < efx->n_channels; i++) {
- channel = other_channel[i];
- if (channel && channel->type->copy) {
- efx_fini_napi_channel(channel);
- efx_remove_channel(channel);
- kfree(channel);
- }
- }
-
- rc2 = efx_soft_enable_interrupts(efx);
- if (rc2) {
- rc = rc ? rc : rc2;
- netif_err(efx, drv, efx->net_dev,
- "unable to restart interrupts on channel reallocation\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- } else {
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-
-rollback:
- /* Swap back */
- efx->rxq_entries = old_rxq_entries;
- efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
- goto out;
-}
-
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
-{
- mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
-}
-
-static bool efx_default_channel_want_txqs(struct efx_channel *channel)
-{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
/**************************************************************************
*
* Port handling
*
**************************************************************************/
-/* This ensures that the kernel is kept informed (via
- * netif_carrier_on/off) of the link status, and also maintains the
- * link status's stop on the port's TX queue.
- */
-void efx_link_status_changed(struct efx_nic *efx)
-{
- struct efx_link_state *link_state = &efx->link_state;
-
- /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
- * that no events are triggered between unregister_netdev() and the
- * driver unloading. A more general condition is that NETDEV_CHANGE
- * can only be generated between NETDEV_UP and NETDEV_DOWN */
- if (!netif_running(efx->net_dev))
- return;
-
- if (link_state->up != netif_carrier_ok(efx->net_dev)) {
- efx->n_link_state_changes++;
-
- if (link_state->up)
- netif_carrier_on(efx->net_dev);
- else
- netif_carrier_off(efx->net_dev);
- }
-
- /* Status message for kernel log */
- if (link_state->up)
- netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)\n",
- link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu);
- else
- netif_info(efx, link, efx->net_dev, "link down\n");
-}
-
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising)
-{
- memcpy(efx->link_advertising, advertising,
- sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
-
- efx->link_advertising[0] |= ADVERTISED_Autoneg;
- if (advertising[0] & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising[0] & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
-}
-
/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
* force the Autoneg bit on.
*/
@@ -1035,73 +160,6 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
-/* We assume that efx->type->reconfigure_mac will always try to sync RX
- * filters and therefore needs to read-lock the filter table against freeing
- */
-void efx_mac_reconfigure(struct efx_nic *efx)
-{
- down_read(&efx->filter_sem);
- efx->type->reconfigure_mac(efx);
- up_read(&efx->filter_sem);
-}
-
-/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
- * the MAC appropriately. All other PHY configuration changes are pushed
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
- * through efx_monitor().
- *
- * Callers must hold the mac_lock
- */
-int __efx_reconfigure_port(struct efx_nic *efx)
-{
- enum efx_phy_mode phy_mode;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- /* Disable PHY transmit in mac level loopbacks */
- phy_mode = efx->phy_mode;
- if (LOOPBACK_INTERNAL(efx))
- efx->phy_mode |= PHY_MODE_TX_DISABLED;
- else
- efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
-
- rc = efx->type->reconfigure_port(efx);
-
- if (rc)
- efx->phy_mode = phy_mode;
-
- return rc;
-}
-
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled. */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Asynchronous work item for changing MAC promiscuity and multicast
- * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
- * MAC directly. */
-static void efx_mac_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
-
- mutex_lock(&efx->mac_lock);
- if (efx->port_enabled)
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-}
-
static int efx_probe_port(struct efx_nic *efx)
{
int rc;
@@ -1155,44 +213,6 @@ fail1:
return rc;
}
-static void efx_start_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifup, efx->net_dev, "start port\n");
- BUG_ON(efx->port_enabled);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = true;
-
- /* Ensure MAC ingress/egress is enabled */
- efx_mac_reconfigure(efx);
-
- mutex_unlock(&efx->mac_lock);
-}
-
-/* Cancel work for MAC reconfiguration, periodic hardware monitoring
- * and the async self-test, wait for them to finish and prevent them
- * being scheduled again. This doesn't cover online resets, which
- * should only be cancelled when removing the device.
- */
-static void efx_stop_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = false;
- mutex_unlock(&efx->mac_lock);
-
- /* Serialise against efx_set_multicast_list() */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- cancel_work_sync(&efx->mac_work);
-}
-
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
@@ -1291,582 +311,6 @@ static void efx_dissociate(struct efx_nic *efx)
}
}
-/* This configures the PCI device to enable I/O and DMA. */
-static int efx_init_io(struct efx_nic *efx)
-{
- struct pci_dev *pci_dev = efx->pci_dev;
- dma_addr_t dma_mask = efx->type->max_dma_mask;
- unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc, bar;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
-
- bar = efx->type->mem_bar(efx);
-
- rc = pci_enable_device(pci_dev);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
- goto fail1;
- }
-
- pci_set_master(pci_dev);
-
- /* Set the PCI DMA mask. Try all possibilities from our genuine mask
- * down to 32 bits, because some architectures will allow 40 bit
- * masks event though they reject 46 bit masks.
- */
- while (dma_mask > 0x7fffffffUL) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
- if (rc == 0)
- break;
- dma_mask >>= 1;
- }
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
- goto fail2;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long) dma_mask);
-
- efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
- rc = pci_request_region(pci_dev, bar, "sfc");
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR failed\n");
- rc = -EIO;
- goto fail3;
- }
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
- if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys, mem_map_size);
- rc = -ENOMEM;
- goto fail4;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
-
- return 0;
-
- fail4:
- pci_release_region(efx->pci_dev, bar);
- fail3:
- efx->membase_phys = 0;
- fail2:
- pci_disable_device(efx->pci_dev);
- fail1:
- return rc;
-}
-
-static void efx_fini_io(struct efx_nic *efx)
-{
- int bar;
-
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
-
- if (efx->membase) {
- iounmap(efx->membase);
- efx->membase = NULL;
- }
-
- if (efx->membase_phys) {
- bar = efx->type->mem_bar(efx);
- pci_release_region(efx->pci_dev, bar);
- efx->membase_phys = 0;
- }
-
- /* Don't disable bus-mastering if VFs are assigned */
- if (!pci_vfs_assigned(efx->pci_dev))
- pci_disable_device(efx->pci_dev);
-}
-
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
-}
-
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
-{
- cpumask_var_t thread_mask;
- unsigned int count;
- int cpu;
-
- if (rss_cpus) {
- count = rss_cpus;
- } else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
-
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
- }
-
- if (count > EFX_MAX_RX_QUEUES) {
- netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
- "Reducing number of rx queues from %u to %u.\n",
- count, EFX_MAX_RX_QUEUES);
- count = EFX_MAX_RX_QUEUES;
- }
-
- /* If RSS is requested for the PF *and* VFs then we can't write RSS
- * table entries that are inaccessible to VFs
- */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
- }
- }
-#endif
-
- return count;
-}
-
-static int efx_allocate_msix_channels(struct efx_nic *efx,
- unsigned int max_channels,
- unsigned int extra_channels,
- unsigned int parallelism)
-{
- unsigned int n_channels = parallelism;
- int vec_count;
- int n_xdp_tx;
- int n_xdp_ev;
-
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
-
- /* To allow XDP transmit to happen from arbitrary NAPI contexts
- * we allocate a TX queue per CPU. We share event queues across
- * multiple tx queues, assuming tx and ev queues are both
- * maximum size.
- */
-
- n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
-
- vec_count = pci_msix_vec_count(efx->pci_dev);
- if (vec_count < 0)
- return vec_count;
-
- max_channels = min_t(unsigned int, vec_count, max_channels);
-
- /* Check resources.
- * We need a channel per event queue, plus a VI per tx queue.
- * This may be more pessimistic than it needs to be.
- */
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
- } else {
- efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
- efx->xdp_tx_queue_count = n_xdp_tx;
- n_channels += n_xdp_ev;
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
- }
-
- if (vec_count < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
- vec_count, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = vec_count;
- }
-
- n_channels = min(n_channels, max_channels);
-
- efx->n_channels = n_channels;
-
- /* Ignore XDP tx channels when creating rx channels. */
- n_channels -= efx->n_xdp_channels;
-
- if (efx_separate_tx_channels) {
- efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
- efx->tx_channel_offset =
- n_channels - efx->n_tx_channels;
- efx->n_rx_channels =
- max(n_channels -
- efx->n_tx_channels, 1U);
- } else {
- efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
- efx->tx_channel_offset = 0;
- efx->n_rx_channels = n_channels;
- }
-
- efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
- efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
-
- efx->xdp_channel_offset = n_channels;
-
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %u RX channels\n",
- efx->n_rx_channels);
-
- return efx->n_channels;
-}
-
-/* Probe the number and type of interrupts we are able to obtain, and
- * the resulting numbers of channels and RX queues.
- */
-static int efx_probe_interrupts(struct efx_nic *efx)
-{
- unsigned int extra_channels = 0;
- unsigned int rss_spread;
- unsigned int i, j;
- int rc;
-
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
- if (efx->extra_channel_type[i])
- ++extra_channels;
-
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- unsigned int parallelism = efx_wanted_parallelism(efx);
- struct msix_entry xentries[EFX_MAX_CHANNELS];
- unsigned int n_channels;
-
- rc = efx_allocate_msix_channels(efx, efx->max_channels,
- extra_channels, parallelism);
- if (rc >= 0) {
- n_channels = rc;
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
- n_channels);
- }
- if (rc < 0) {
- /* Fall back to single channel MSI */
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- else
- return rc;
- } else if (rc < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors"
- " available (%d < %u).\n", rc, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = rc;
- }
-
- if (rc > 0) {
- for (i = 0; i < efx->n_channels; i++)
- efx_get_channel(efx, i)->irq =
- xentries[i].vector;
- }
- }
-
- /* Try single interrupt MSI */
- if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_channels = 1;
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- rc = pci_enable_msi(efx->pci_dev);
- if (rc == 0) {
- efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
- } else {
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
- efx->interrupt_mode = EFX_INT_MODE_LEGACY;
- else
- return rc;
- }
- }
-
- /* Assume legacy interrupts */
- if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- efx->legacy_irq = efx->pci_dev->irq;
- }
-
- /* Assign extra channels if possible, before XDP channels */
- efx->n_extra_tx_channels = 0;
- j = efx->xdp_channel_offset;
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
- if (!efx->extra_channel_type[i])
- continue;
- if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
- efx->extra_channel_type[i]->handle_no_channel(efx);
- } else {
- --j;
- efx_get_channel(efx, j)->type =
- efx->extra_channel_type[i];
- if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
- efx->n_extra_tx_channels++;
- }
- }
-
- rss_spread = efx->n_rx_channels;
- /* RSS might be usable on VFs even if it is disabled on the PF */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- efx->rss_spread = ((rss_spread > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- rss_spread : efx_vf_size(efx));
- return 0;
- }
-#endif
- efx->rss_spread = rss_spread;
-
- return 0;
-}
-
-#if defined(CONFIG_SMP)
-static void efx_set_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- unsigned int cpu;
-
- efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
- irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
- }
-}
-
-static void efx_clear_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- irq_set_affinity_hint(channel->irq, NULL);
-}
-#else
-static void
-efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-
-static void
-efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-#endif /* CONFIG_SMP */
-
-static int efx_soft_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- efx->irq_soft_enabled = true;
- smp_wmb();
-
- efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- efx_start_eventq(channel);
- }
-
- efx_mcdi_mode_event(efx);
-
- return 0;
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- return rc;
-}
-
-static void efx_soft_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- if (efx->state == STATE_DISABLED)
- return;
-
- efx_mcdi_mode_poll(efx);
-
- efx->irq_soft_enabled = false;
- smp_wmb();
-
- if (efx->legacy_irq)
- synchronize_irq(efx->legacy_irq);
-
- efx_for_each_channel(channel, efx) {
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- /* Flush the asynchronous MCDI request queue */
- efx_mcdi_flush_async(efx);
-}
-
-static int efx_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
-
- efx->type->irq_enable_master(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- }
-
- rc = efx_soft_enable_interrupts(efx);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-
- return rc;
-}
-
-static void efx_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_soft_disable_interrupts(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-}
-
-static void efx_remove_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- /* Remove MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- channel->irq = 0;
- pci_disable_msi(efx->pci_dev);
- pci_disable_msix(efx->pci_dev);
-
- /* Remove legacy interrupt */
- efx->legacy_irq = 0;
-}
-
-static int efx_set_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- int xdp_queue_number;
-
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
- if (efx->xdp_tx_queue_count) {
- EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
-
- /* Allocate array for XDP TX queue lookup. */
- efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
- sizeof(*efx->xdp_tx_queues),
- GFP_KERNEL);
- if (!efx->xdp_tx_queues)
- return -ENOMEM;
- }
-
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
- efx_for_each_channel(channel, efx) {
- if (channel->channel < efx->n_rx_channels)
- channel->rx_queue.core_index = channel->channel;
- else
- channel->rx_queue.core_index = -1;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue -= (efx->tx_channel_offset *
- EFX_TXQ_TYPES);
-
- if (efx_channel_is_xdp_tx(channel) &&
- xdp_queue_number < efx->xdp_tx_queue_count) {
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
- }
- }
- }
- return 0;
-}
-
static int efx_probe_nic(struct efx_nic *efx)
{
int rc;
@@ -1939,70 +383,6 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
-static int efx_probe_filters(struct efx_nic *efx)
-{
- int rc;
-
- init_rwsem(&efx->filter_sem);
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- rc = efx->type->filter_table_probe(efx);
- if (rc)
- goto out_unlock;
-
-#ifdef CONFIG_RFS_ACCEL
- if (efx->type->offload_features & NETIF_F_NTUPLE) {
- struct efx_channel *channel;
- int i, success = 1;
-
- efx_for_each_channel(channel, efx) {
- channel->rps_flow_id =
- kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*channel->rps_flow_id),
- GFP_KERNEL);
- if (!channel->rps_flow_id)
- success = 0;
- else
- for (i = 0;
- i < efx->type->max_rx_ip_filters;
- ++i)
- channel->rps_flow_id[i] =
- RPS_FLOW_ID_INVALID;
- channel->rfs_expire_index = 0;
- channel->rfs_filter_count = 0;
- }
-
- if (!success) {
- efx_for_each_channel(channel, efx)
- kfree(channel->rps_flow_id);
- efx->type->filter_table_remove(efx);
- rc = -ENOMEM;
- goto out_unlock;
- }
- }
-#endif
-out_unlock:
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
- return rc;
-}
-
-static void efx_remove_filters(struct efx_nic *efx)
-{
-#ifdef CONFIG_RFS_ACCEL
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx) {
- cancel_delayed_work_sync(&channel->filter_work);
- kfree(channel->rps_flow_id);
- }
-#endif
- down_write(&efx->filter_sem);
- efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
-}
-
-
/**************************************************************************
*
* NIC startup/shutdown
@@ -2067,81 +447,6 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
-/* If the interface is supposed to be running but is not, start
- * the hardware and software data path, regular activity for the port
- * (MAC statistics, link polling, etc.) and schedule the port to be
- * reconfigured. Interrupts must already be enabled. This function
- * is safe to call multiple times, so long as the NIC is not disabled.
- * Requires the RTNL lock.
- */
-static void efx_start_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->state == STATE_DISABLED);
-
- /* Check that it is appropriate to restart the interface. All
- * of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled || !netif_running(efx->net_dev) ||
- efx->reset_pending)
- return;
-
- efx_start_port(efx);
- efx_start_datapath(efx);
-
- /* Start the hardware monitor if there is one */
- if (efx->type->monitor != NULL)
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-
- /* Link state detection is normally event-driven; we have
- * to poll now because we could have missed a change
- */
- mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
- efx_link_status_changed(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx->type->start_stats(efx);
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
-}
-
-/* Quiesce the hardware and software data path, and regular activity
- * for the port without bringing the link down. Safe to call multiple
- * times with the NIC in almost any state, but interrupts should be
- * enabled. Requires the RTNL lock.
- */
-static void efx_stop_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- /* port_enabled can be read safely under the rtnl lock */
- if (!efx->port_enabled)
- return;
-
- /* update stats before we go down so we can accurately count
- * rx_nodesc_drops
- */
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
- efx->type->stop_stats(efx);
- efx_stop_port(efx);
-
- /* Stop the kernel transmit interface. This is only valid if
- * the device is stopped or detached; otherwise the watchdog
- * may fire immediately.
- */
- WARN_ON(netif_running(efx->net_dev) &&
- netif_device_present(efx->net_dev));
- netif_tx_disable(efx->net_dev);
-
- efx_stop_datapath(efx);
-}
-
static void efx_remove_all(struct efx_nic *efx)
{
rtnl_lock();
@@ -2237,36 +542,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * Hardware monitor
- *
- **************************************************************************/
-
-/* Run periodically off the general workqueue */
-static void efx_monitor(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic,
- monitor_work.work);
-
- netif_vdbg(efx, timer, efx->net_dev,
- "hardware monitor executing on CPU %d\n",
- raw_smp_processor_id());
- BUG_ON(efx->type->monitor == NULL);
-
- /* If the mac_lock is already held then it is likely a port
- * reconfiguration is already in place, which will likely do
- * most of the work of monitor() anyway. */
- if (mutex_trylock(&efx->mac_lock)) {
- if (efx->port_enabled)
- efx->type->monitor(efx);
- mutex_unlock(&efx->mac_lock);
- }
-
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-}
-
-/**************************************************************************
- *
* ioctls
*
*************************************************************************/
@@ -2294,45 +569,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/**************************************************************************
*
- * NAPI interface
- *
- **************************************************************************/
-
-static void efx_init_napi_channel(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
-}
-
-static void efx_init_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_init_napi_channel(channel);
-}
-
-static void efx_fini_napi_channel(struct efx_channel *channel)
-{
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
-
- channel->napi_dev = NULL;
-}
-
-static void efx_fini_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_fini_napi_channel(channel);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2382,19 +618,8 @@ int efx_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static void efx_net_stats(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, stats);
- spin_unlock_bh(&efx->stats_lock);
-}
-
/* Context: netif_tx_lock held, BHs disabled. */
-static void efx_watchdog(struct net_device *net_dev)
+static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2405,51 +630,6 @@ static void efx_watchdog(struct net_device *net_dev)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
-static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
-{
- /* The maximum MTU that we can fit in a single page, allowing for
- * framing, overhead and XDP headroom.
- */
- int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
- efx->rx_prefix_size + efx->type->rx_buffer_padding +
- efx->rx_ip_align + XDP_PACKET_HEADROOM;
-
- return PAGE_SIZE - overhead;
-}
-
-/* Context: process, rtnl_lock() held. */
-static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- if (rtnl_dereference(efx->xdp_prog) &&
- new_mtu > efx_xdp_max_mtu(efx)) {
- netif_err(efx, drv, efx->net_dev,
- "Requested MTU of %d too big for XDP (max: %d)\n",
- new_mtu, efx_xdp_max_mtu(efx));
- return -EINVAL;
- }
-
- netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
-
- mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- return 0;
-}
-
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2726,28 +906,6 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
-#ifdef CONFIG_SFC_MCDI_LOGGING
-static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct efx_nic *efx = dev_get_drvdata(dev);
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
-}
-static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct efx_nic *efx = dev_get_drvdata(dev);
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- bool enable = count > 0 && *buf != '0';
-
- mcdi->logging_enabled = enable;
- return count;
-}
-static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
-#endif
-
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2807,21 +965,11 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
-#ifdef CONFIG_SFC_MCDI_LOGGING
- rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to init net dev attributes\n");
- goto fail_attr_mcdi_logging;
- }
-#endif
+
+ efx_init_mcdi_logging(efx);
return 0;
-#ifdef CONFIG_SFC_MCDI_LOGGING
-fail_attr_mcdi_logging:
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2842,9 +990,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
-#ifdef CONFIG_SFC_MCDI_LOGGING
- device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
-#endif
+ efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2852,292 +998,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/**************************************************************************
*
- * Device reset and suspend
- *
- **************************************************************************/
-
-/* Tears down the entire software state and most of the hardware state
- * before reset. */
-void efx_reset_down(struct efx_nic *efx, enum reset_type method)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->prepare_flr(efx);
-
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
-
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH)
- efx->phy_op->fini(efx);
- efx->type->fini(efx);
-}
-
-/* This function will always ensure that the locks acquired in
- * efx_reset_down() are released. A failure return code indicates
- * that we were unable to reinitialise the hardware, and the
- * driver should be disabled. If ok is false, then the rx and tx
- * engines are not restarted, pending a RESET_DISABLE. */
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->finish_flr(efx);
-
- /* Ensure that SRAM is initialised even if we're disabling the device */
- rc = efx->type->init(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
- goto fail;
- }
-
- if (!ok)
- goto fail;
-
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH) {
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail;
- rc = efx->phy_op->reconfigure(efx);
- if (rc && rc != -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "could not restore PHY settings\n");
- }
-
- rc = efx_enable_interrupts(efx);
- if (rc)
- goto fail;
-
-#ifdef CONFIG_SFC_SRIOV
- rc = efx->type->vswitching_restore(efx);
- if (rc) /* not fatal; the PF will still work fine */
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore vswitching rc=%d;"
- " VFs may not function\n", rc);
-#endif
-
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
- efx->type->filter_table_restore(efx);
- up_write(&efx->filter_sem);
- if (efx->type->sriov_reset)
- efx->type->sriov_reset(efx);
-
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
-
- if (efx->type->udp_tnl_push_ports)
- efx->type->udp_tnl_push_ports(efx);
-
- return 0;
-
-fail:
- efx->port_initialized = false;
-
- mutex_unlock(&efx->rss_lock);
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Reset the NIC using the specified method. Note that the reset may
- * fail, in which case the card will be left in an unusable state.
- *
- * Caller must hold the rtnl_lock.
- */
-int efx_reset(struct efx_nic *efx, enum reset_type method)
-{
- int rc, rc2;
- bool disabled;
-
- netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
- RESET_TYPE(method));
-
- efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
-
- rc = efx->type->reset(efx, method);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
- goto out;
- }
-
- /* Clear flags for the scopes we covered. We assume the NIC and
- * driver are now quiescent so that there is no race here.
- */
- if (method < RESET_TYPE_MAX_METHOD)
- efx->reset_pending &= -(1 << (method + 1));
- else /* it doesn't fit into the well-ordered scope hierarchy */
- __clear_bit(method, &efx->reset_pending);
-
- /* Reinitialise bus-mastering, which may have been turned off before
- * the reset was scheduled. This is still appropriate, even in the
- * RESET_TYPE_DISABLE since this driver generally assumes the hardware
- * can respond to requests. */
- pci_set_master(efx->pci_dev);
-
-out:
- /* Leave device stopped if necessary */
- disabled = rc ||
- method == RESET_TYPE_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
- if (rc2) {
- disabled = true;
- if (!rc)
- rc = rc2;
- }
-
- if (disabled) {
- dev_close(efx->net_dev);
- netif_err(efx, drv, efx->net_dev, "has been disabled\n");
- efx->state = STATE_DISABLED;
- } else {
- netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-}
-
-/* Try recovery mechanisms.
- * For now only EEH is supported.
- * Returns 0 if the recovery mechanisms are unsuccessful.
- * Returns a non-zero value otherwise.
- */
-int efx_try_recovery(struct efx_nic *efx)
-{
-#ifdef CONFIG_EEH
- /* A PCI error can occur and not be seen by EEH because nothing
- * happens on the PCI bus. In this case the driver may fail and
- * schedule a 'recover or reset', leading to this recovery handler.
- * Manually call the eeh failure check function.
- */
- struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
- if (eeh_dev_check_failure(eehdev)) {
- /* The EEH mechanisms will handle the error and reset the
- * device if necessary.
- */
- return 1;
- }
-#endif
- return 0;
-}
-
-static void efx_wait_for_bist_end(struct efx_nic *efx)
-{
- int i;
-
- for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
- if (efx_mcdi_poll_reboot(efx))
- goto out;
- msleep(BIST_WAIT_DELAY_MS);
- }
-
- netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
-out:
- /* Either way unset the BIST flag. If we found no reboot we probably
- * won't recover, but we should try.
- */
- efx->mc_bist_for_other_fn = false;
-}
-
-/* The worker thread exists so that code that cannot sleep can
- * schedule a reset for later.
- */
-static void efx_reset_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending;
- enum reset_type method;
-
- pending = READ_ONCE(efx->reset_pending);
- method = fls(pending) - 1;
-
- if (method == RESET_TYPE_MC_BIST)
- efx_wait_for_bist_end(efx);
-
- if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_ALL) &&
- efx_try_recovery(efx))
- return;
-
- if (!pending)
- return;
-
- rtnl_lock();
-
- /* We checked the state in efx_schedule_reset() but it may
- * have changed by now. Now that we have the RTNL lock,
- * it cannot change again.
- */
- if (efx->state == STATE_READY)
- (void)efx_reset(efx, method);
-
- rtnl_unlock();
-}
-
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
-{
- enum reset_type method;
-
- if (efx->state == STATE_RECOVERY) {
- netif_dbg(efx, drv, efx->net_dev,
- "recovering: skip scheduling %s reset\n",
- RESET_TYPE(type));
- return;
- }
-
- switch (type) {
- case RESET_TYPE_INVISIBLE:
- case RESET_TYPE_ALL:
- case RESET_TYPE_RECOVER_OR_ALL:
- case RESET_TYPE_WORLD:
- case RESET_TYPE_DISABLE:
- case RESET_TYPE_RECOVER_OR_DISABLE:
- case RESET_TYPE_DATAPATH:
- case RESET_TYPE_MC_BIST:
- case RESET_TYPE_MCDI_TIMEOUT:
- method = type;
- netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
- RESET_TYPE(method));
- break;
- default:
- method = efx->type->map_reset_reason(type);
- netif_dbg(efx, drv, efx->net_dev,
- "scheduling %s reset for %s\n",
- RESET_TYPE(method), RESET_TYPE(type));
- break;
- }
-
- set_bit(method, &efx->reset_pending);
- smp_mb(); /* ensure we change reset_pending before checking state */
-
- /* If we're not READY then just leave the flags set as the cue
- * to abort probing or reschedule the reset later.
- */
- if (READ_ONCE(efx->state) != STATE_READY)
- return;
-
- /* efx_process_channel() will no longer read events once a
- * reset is scheduled. So switch back to poll'd MCDI completions. */
- efx_mcdi_mode_poll(efx);
-
- queue_work(reset_workqueue, &efx->reset_work);
-}
-
-/**************************************************************************
- *
* List of NICs we support
*
**************************************************************************/
@@ -3169,139 +1029,10 @@ static const struct pci_device_id efx_pci_table[] = {
/**************************************************************************
*
- * Dummy PHY/MAC operations
- *
- * Can be used for some unimplemented operations
- * Needed so all function pointers are valid and do not have to be tested
- * before use
- *
- **************************************************************************/
-int efx_port_dummy_op_int(struct efx_nic *efx)
-{
- return 0;
-}
-void efx_port_dummy_op_void(struct efx_nic *efx) {}
-
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
-{
- return false;
-}
-
-static const struct efx_phy_operations efx_dummy_phy_operations = {
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_port_dummy_op_int,
- .poll = efx_port_dummy_op_poll,
- .fini = efx_port_dummy_op_void,
-};
-
-/**************************************************************************
- *
* Data housekeeping
*
**************************************************************************/
-/* This zeroes out and then fills in the invariants in a struct
- * efx_nic (including all sub-structures).
- */
-static int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
-{
- int rc = -ENOMEM, i;
-
- /* Initialise common structures */
- INIT_LIST_HEAD(&efx->node);
- INIT_LIST_HEAD(&efx->secondary_list);
- spin_lock_init(&efx->biu_lock);
-#ifdef CONFIG_SFC_MTD
- INIT_LIST_HEAD(&efx->mtd_list);
-#endif
- INIT_WORK(&efx->reset_work, efx_reset_work);
- INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
- INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
- efx->pci_dev = pci_dev;
- efx->msg_enable = debug;
- efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
-
- efx->net_dev = net_dev;
- efx->rx_prefix_size = efx->type->rx_prefix_size;
- efx->rx_ip_align =
- NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
- efx->rx_packet_hash_offset =
- efx->type->rx_hash_offset - efx->type->rx_prefix_size;
- efx->rx_packet_ts_offset =
- efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- mutex_init(&efx->rss_lock);
- spin_lock_init(&efx->stats_lock);
- efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
- efx->num_mac_stats = MC_CMD_MAC_NSTATS;
- BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
- mutex_init(&efx->mac_lock);
-#ifdef CONFIG_RFS_ACCEL
- mutex_init(&efx->rps_mutex);
- spin_lock_init(&efx->rps_hash_lock);
- /* Failure to allocate is not fatal, but may degrade ARFS performance */
- efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
- sizeof(*efx->rps_hash_table), GFP_KERNEL);
-#endif
- efx->phy_op = &efx_dummy_phy_operations;
- efx->mdio.dev = net_dev;
- INIT_WORK(&efx->mac_work, efx_mac_work);
- init_waitqueue_head(&efx->flush_wq);
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
- if (!efx->channel[i])
- goto fail;
- efx->msi_context[i].efx = efx;
- efx->msi_context[i].index = i;
- }
-
- /* Higher numbered interrupt modes are less capable! */
- if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
- efx->type->min_interrupt_mode)) {
- rc = -EIO;
- goto fail;
- }
- efx->interrupt_mode = max(efx->type->max_interrupt_mode,
- interrupt_mode);
- efx->interrupt_mode = min(efx->type->min_interrupt_mode,
- interrupt_mode);
-
- /* Would be good to use the net_dev name, but we're too early */
- snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
- pci_name(pci_dev));
- efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
- if (!efx->workqueue)
- goto fail;
-
- return 0;
-
-fail:
- efx_fini_struct(efx);
- return rc;
-}
-
-static void efx_fini_struct(struct efx_nic *efx)
-{
- int i;
-
-#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_hash_table);
-#endif
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++)
- kfree(efx->channel[i]);
-
- kfree(efx->vpd_sn);
-
- if (efx->workqueue) {
- destroy_workqueue(efx->workqueue);
- efx->workqueue = NULL;
- }
-}
-
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
{
u64 n_rx_nodesc_trunc = 0;
@@ -3313,197 +1044,6 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
}
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if ((left->match_flags ^ right->match_flags) |
- ((left->flags ^ right->flags) &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
- return false;
-
- return memcmp(&left->outer_vid, &right->outer_vid,
- sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
-}
-
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
- (sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
- 0);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force)
-{
- if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
- /* ARFS is currently updating this entry, leave it */
- return false;
- }
- if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
- /* ARFS tried and failed to update this, so it's probably out
- * of date. Remove the filter and the ARFS rule entry.
- */
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
- *force = true;
- return true;
- } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
- /* ARFS has moved on, so old filter is not needed. Since we did
- * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
- * not be removed by efx_rps_hash_del() subsequently.
- */
- *force = true;
- return true;
- }
- /* Remove it iff ARFS wants to. */
- return true;
-}
-
-static
-struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
- const struct efx_filter_spec *spec)
-{
- u32 hash = efx_filter_spec_hash(spec);
-
- lockdep_assert_held(&efx->rps_hash_lock);
- if (!efx->rps_hash_table)
- return NULL;
- return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
-}
-
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
- const struct efx_filter_spec *spec)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (!head)
- return NULL;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec))
- return rule;
- }
- return NULL;
-}
-
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (!head)
- return NULL;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
- *new = false;
- return rule;
- }
- }
- rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
- *new = true;
- if (rule) {
- memcpy(&rule->spec, spec, sizeof(rule->spec));
- hlist_add_head(&rule->node, head);
- }
- return rule;
-}
-
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (WARN_ON(!head))
- return;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
- /* Someone already reused the entry. We know that if
- * this check doesn't fire (i.e. filter_id == REMOVING)
- * then the REMOVING mark was put there by our caller,
- * because caller is holding a lock on filter table and
- * only holders of that lock set REMOVING.
- */
- if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
- return;
- hlist_del(node);
- kfree(rule);
- return;
- }
- }
- /* We didn't find it. */
- WARN_ON(1);
-}
-#endif
-
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
- if (!new)
- return NULL;
- new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
-}
-
/**************************************************************************
*
* PCI interface
@@ -3519,7 +1059,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
- cancel_work_sync(&efx->reset_work);
+ efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
efx_clear_interrupt_affinity(efx);
@@ -3559,7 +1099,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
@@ -3782,7 +1322,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
- rc = efx_init_io(efx);
+ rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
+ efx->type->mem_map_size(efx));
if (rc)
goto fail2;
@@ -3826,7 +1367,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return 0;
fail3:
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
fail2:
efx_fini_struct(efx);
fail1:
@@ -3904,7 +1445,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
- queue_work(reset_workqueue, &efx->reset_work);
+ efx_queue_reset_work(efx);
return 0;
@@ -4083,10 +1624,6 @@ static struct pci_driver efx_pci_driver = {
*
*************************************************************************/
-module_param(interrupt_mode, uint, 0444);
-MODULE_PARM_DESC(interrupt_mode,
- "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
-
static int __init efx_init_module(void)
{
int rc;
@@ -4103,11 +1640,9 @@ static int __init efx_init_module(void)
goto err_sriov;
#endif
- reset_workqueue = create_singlethread_workqueue("sfc_reset");
- if (!reset_workqueue) {
- rc = -ENOMEM;
+ rc = efx_create_reset_workqueue();
+ if (rc)
goto err_reset;
- }
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
@@ -4116,7 +1651,7 @@ static int __init efx_init_module(void)
return 0;
err_pci:
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
err_reset:
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
@@ -4132,7 +1667,7 @@ static void __exit efx_exit_module(void)
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
#endif
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2dd8d5002315..f1bdb04efbe4 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,31 +15,17 @@ int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
/* TX */
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
/* RX */
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
-void efx_rx_config_page_split(struct efx_nic *efx);
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
-void efx_rx_slow_fill(struct timer_list *t);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
@@ -48,7 +34,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -80,8 +65,6 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
/* Filters */
-void efx_mac_reconfigure(struct efx_nic *efx);
-
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -186,58 +169,17 @@ static inline void efx_filter_rfs_expire(struct work_struct *data)
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#define efx_filter_rfs_enabled() 0
#endif
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
-
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right);
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
-
-#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force);
-
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
- const struct efx_filter_spec *spec);
-
-/* @new is written to indicate if entry was newly added (true) or if an old
- * entry was found and returned (false).
- */
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new);
-
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
-#endif
/* RSS contexts */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
- return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
+ return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
-/* Channels */
-int efx_channel_dummy_op_int(struct efx_channel *channel);
-void efx_channel_dummy_op_void(struct efx_channel *channel);
-int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-
-/* Ports */
-int efx_reconfigure_port(struct efx_nic *efx);
-int __efx_reconfigure_port(struct efx_nic *efx);
-
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
-/* Reset handling */
-int efx_reset(struct efx_nic *efx, enum reset_type method);
-void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-int efx_try_recovery(struct efx_nic *efx);
-
/* Global */
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
@@ -245,8 +187,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx);
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
-void efx_stop_eventq(struct efx_channel *channel);
-void efx_start_eventq(struct efx_channel *channel);
/* Dummy PHY ops for PHY drivers */
int efx_port_dummy_op_int(struct efx_nic *efx);
@@ -293,9 +233,6 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising);
void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
new file mode 100644
index 000000000000..aeb5e8aa2f2a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include "efx_channels.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "tx_common.h"
+#include "rx_common.h"
+#include "nic.h"
+#include "sriov.h"
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static unsigned int irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned int irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/***************
+ * Housekeeping
+ ***************/
+
+int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
+
+/*************
+ * INTERRUPTS
+ *************/
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ cpumask_var_t thread_mask;
+ unsigned int count;
+ int cpu;
+
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_sibling_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(thread_mask);
+ }
+
+ if (count > EFX_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EFX_MAX_RX_QUEUES);
+ count = EFX_MAX_RX_QUEUES;
+ }
+
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
+ }
+#endif
+
+ return count;
+}
+
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+
+ max_channels = min_t(unsigned int, vec_count, max_channels);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ efx->n_channels = n_channels;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+ efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+ efx->xdp_channel_offset = n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+int efx_probe_interrupts(struct efx_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int rss_spread;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ else
+ return rc;
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ for (i = 0; i < efx->n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ else
+ return rc;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible, before XDP channels */
+ efx->n_extra_tx_channels = 0;
+ j = efx->xdp_channel_offset;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
+ }
+ }
+
+ rss_spread = efx->n_rx_channels;
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((rss_spread > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ rss_spread : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = rss_spread;
+
+ return 0;
+}
+
+#if defined(CONFIG_SMP)
+void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
+void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+/***************
+ * EVENT QUEUES
+ ***************/
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions.
+ */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+int efx_init_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ channel->enabled = false;
+}
+
+void efx_fini_eventq(struct efx_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure. */
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+
+ return channel;
+}
+
+int efx_init_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ return -ENOMEM;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
+ efx->type->min_interrupt_mode)) {
+ return -EIO;
+ }
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+ efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+ interrupt_mode);
+
+ return 0;
+}
+
+void efx_fini_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ if (efx->channel[i]) {
+ kfree(efx->channel[i]);
+ efx->channel[i] = NULL;
+ }
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ channel->rx_list = NULL;
+
+ return 0;
+
+fail:
+ efx_remove_channel(channel);
+ return rc;
+}
+
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (number < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
+}
+
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ unsigned int i, next_buffer_table = 0;
+ u32 old_rxq_entries, old_txq_entries;
+ int rc, rc2;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+int efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
+
+ efx->tx_channel_offset =
+ efx_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
+
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ xdp_queue_number = 0;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
+ }
+ return 0;
+}
+
+bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+/*************
+ * START/STOP
+ *************/
+
+int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ efx_start_eventq(channel);
+ }
+
+ efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx_mcdi_mode_poll(efx);
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ /* TODO: Is this really a bug? */
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+void efx_start_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+}
+
+void efx_stop_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+ int rc = 0;
+
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_stop_eventq(channel);
+ efx_start_eventq(channel);
+ }
+ }
+
+ if (efx->type->fini_dmaq)
+ rc = efx->type->fini_dmaq(efx);
+
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ }
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ efx_rx_flush_packet(channel);
+ efx_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl,
+ tx_queue->bytes_compl);
+ }
+ }
+
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
+ return spent;
+}
+
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ xdp_do_flush_map();
+
+ if (spent < budget) {
+ if (efx_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ efx_update_irq_mod(efx, channel);
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ /* Perhaps expire some ARFS filters */
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
+#endif
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ if (napi_complete_done(napi, spent))
+ efx_nic_eventq_read_ack(channel);
+ }
+
+ return spent;
+}
+
+void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+}
+
+void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
+}
+
+void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+
+ channel->napi_dev = NULL;
+}
+
+void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
new file mode 100644
index 000000000000..8d7b8c4142d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_CHANNELS_H
+#define EFX_CHANNELS_H
+
+int efx_probe_interrupts(struct efx_nic *efx);
+void efx_remove_interrupts(struct efx_nic *efx);
+int efx_soft_enable_interrupts(struct efx_nic *efx);
+void efx_soft_disable_interrupts(struct efx_nic *efx);
+int efx_enable_interrupts(struct efx_nic *efx);
+void efx_disable_interrupts(struct efx_nic *efx);
+
+void efx_set_interrupt_affinity(struct efx_nic *efx);
+void efx_clear_interrupt_affinity(struct efx_nic *efx);
+
+int efx_probe_eventq(struct efx_channel *channel);
+int efx_init_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_fini_eventq(struct efx_channel *channel);
+void efx_remove_eventq(struct efx_channel *channel);
+
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
+void efx_set_channel_names(struct efx_nic *efx);
+int efx_init_channels(struct efx_nic *efx);
+int efx_probe_channels(struct efx_nic *efx);
+int efx_set_channels(struct efx_nic *efx);
+bool efx_default_channel_want_txqs(struct efx_channel *channel);
+void efx_remove_channel(struct efx_channel *channel);
+void efx_remove_channels(struct efx_nic *efx);
+void efx_fini_channels(struct efx_nic *efx);
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
+void efx_start_channels(struct efx_nic *efx);
+void efx_stop_channels(struct efx_nic *efx);
+
+void efx_init_napi_channel(struct efx_channel *channel);
+void efx_init_napi(struct efx_nic *efx);
+void efx_fini_napi_channel(struct efx_channel *channel);
+void efx_fini_napi(struct efx_nic *efx);
+
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
new file mode 100644
index 000000000000..b0d76bc19673
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "io.h"
+#include "mcdi_pcol.h"
+
+static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/* Default stats update time */
+#define STATS_PERIOD_MS_DEFAULT 1000
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *const efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
+};
+
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+int efx_create_reset_workqueue(void)
+{
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ printk(KERN_ERR "Failed to create reset workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void efx_queue_reset_work(struct efx_nic *efx)
+{
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+void efx_flush_reset_workqueue(struct efx_nic *efx)
+{
+ cancel_work_sync(&efx->reset_work);
+}
+
+void efx_destroy_reset_workqueue(void)
+{
+ if (reset_workqueue) {
+ destroy_workqueue(reset_workqueue);
+ reset_workqueue = NULL;
+ }
+}
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ if (efx->type->reconfigure_mac) {
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+ }
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly.
+ */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN
+ */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+ return PAGE_SIZE - overhead;
+}
+
+/* Context: process, rtnl_lock() held. */
+int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+
+ mutex_lock(&efx->mac_lock);
+ net_dev->mtu = new_mtu;
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway.
+ */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled && efx->type->monitor)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx_start_monitor(efx);
+}
+
+void efx_start_monitor(struct efx_nic *efx)
+{
+ if (efx->type->monitor)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+ netdev_features_t old_features = efx->net_dev->features;
+ bool old_rx_scatter = efx->rx_scatter;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
+ /* RX filters may also have scatter-enabled flags */
+ if ((efx->rx_scatter != old_rx_scatter) &&
+ efx->type->filter_update_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ efx_start_channels(efx);
+
+ efx_ptp_start_datapath(efx);
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ efx_ptp_stop_datapath(efx);
+
+ efx_stop_channels(efx);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ efx_mac_reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+void efx_start_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock
+ */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ efx_start_port(efx);
+ efx_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ efx_start_monitor(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (efx->type->start_stats) {
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ }
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+void efx_stop_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ if (efx->type->update_stats) {
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ }
+
+ efx_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc = 0;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ if (efx->type->reconfigure_port)
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled.
+ */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset.
+ */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ mutex_lock(&efx->rss_lock);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE.
+ */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ if (efx->type->rx_restore_rss_contexts)
+ efx->type->rx_restore_rss_contexts(efx);
+ mutex_unlock(&efx->rss_lock);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ bool disabled;
+ int rc, rc2;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests.
+ */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = READ_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in efx_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
+ case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (READ_ONCE(efx->state) != STATE_READY)
+ return;
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions.
+ */
+ efx_mcdi_mode_poll(efx);
+
+ efx_queue_reset_work(efx);
+}
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+int efx_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int rc = -ENOMEM;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ efx_selftest_async_init(efx);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ INIT_LIST_HEAD(&efx->rss_context.list);
+ mutex_init(&efx->rss_lock);
+ spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+ mutex_init(&efx->mac_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
+#endif
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return rc;
+}
+
+void efx_fini_struct(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
+ efx_fini_channels(efx);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc == 0)
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long)dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ if (!efx->membase_phys) {
+ netif_err(efx, probe, efx->net_dev,
+ "ERROR: No BAR%d mapping from the BIOS. "
+ "Try pci=realloc on the kernel command line\n", bar);
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = pci_request_region(pci_dev, bar, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+fail4:
+ pci_release_region(efx->pci_dev, bar);
+fail3:
+ efx->membase_phys = 0;
+fail2:
+ pci_disable_device(efx->pci_dev);
+fail1:
+ return rc;
+}
+
+void efx_fini_io(struct efx_nic *efx, int bar)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, bar);
+ efx->membase_phys = 0;
+ }
+
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+
+void efx_init_mcdi_logging(struct efx_nic *efx)
+{
+ int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ }
+}
+
+void efx_fini_mcdi_logging(struct efx_nic *efx)
+{
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+}
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
new file mode 100644
index 000000000000..fa2fc681e7f9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size);
+void efx_fini_io(struct efx_nic *efx, int bar);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+ struct net_device *net_dev);
+void efx_fini_struct(struct efx_nic *efx);
+
+void efx_start_all(struct efx_nic *efx);
+void efx_stop_all(struct efx_nic *efx);
+
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
+
+int efx_create_reset_workqueue(void);
+void efx_queue_reset_work(struct efx_nic *efx);
+void efx_flush_reset_workqueue(struct efx_nic *efx);
+void efx_destroy_reset_workqueue(void);
+
+void efx_start_monitor(struct efx_nic *efx);
+
+int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+int efx_try_recovery(struct efx_nic *efx);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+
+static inline int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+void efx_init_mcdi_logging(struct efx_nic *efx);
+void efx_fini_mcdi_logging(struct efx_nic *efx);
+#else
+static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
+#endif
+
+void efx_mac_reconfigure(struct efx_nic *efx);
+void efx_link_status_changed(struct efx_nic *efx);
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
+int efx_change_mtu(struct net_device *net_dev, int new_mtu);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index b31032da4bcb..993b5769525b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -13,92 +13,13 @@
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "ethtool_common.h"
#include "filter.h"
#include "nic.h"
-struct efx_sw_stat_desc {
- const char *name;
- enum {
- EFX_ETHTOOL_STAT_SOURCE_nic,
- EFX_ETHTOOL_STAT_SOURCE_channel,
- EFX_ETHTOOL_STAT_SOURCE_tx_queue
- } source;
- unsigned offset;
- u64(*get_stat) (void *field); /* Reader function */
-};
-
-/* Initialiser for a struct efx_sw_stat_desc with type-checking */
-#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
- get_stat_function) { \
- .name = #stat_name, \
- .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
- .offset = ((((field_type *) 0) == \
- &((struct efx_##source_name *)0)->field) ? \
- offsetof(struct efx_##source_name, field) : \
- offsetof(struct efx_##source_name, field)), \
- .get_stat = get_stat_function, \
-}
-
-static u64 efx_get_uint_stat(void *field)
-{
- return *(unsigned int *)field;
-}
-
-static u64 efx_get_atomic_stat(void *field)
-{
- return atomic_read((atomic_t *) field);
-}
-
-#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
- EFX_ETHTOOL_STAT(field, nic, field, \
- atomic_t, efx_get_atomic_stat)
-
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
- EFX_ETHTOOL_STAT(field, channel, n_##field, \
- unsigned int, efx_get_uint_stat)
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
- EFX_ETHTOOL_STAT(field, channel, field, \
- unsigned int, efx_get_uint_stat)
-
-#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
- EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
- unsigned int, efx_get_uint_stat)
-
-static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
- EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
- EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
- EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
-#ifdef CONFIG_RFS_ACCEL
- EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
-#endif
-};
-
-#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
-
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
/**************************************************************************
@@ -185,18 +106,6 @@ efx_ethtool_set_link_ksettings(struct net_device *net_dev,
return rc;
}
-static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
- struct ethtool_drvinfo *info)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
- efx_mcdi_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
-}
-
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
return efx_nic_get_regs_len(netdev_priv(net_dev));
@@ -211,341 +120,6 @@ static void efx_ethtool_get_regs(struct net_device *net_dev,
efx_nic_get_regs(efx, buf);
}
-static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- return efx->msg_enable;
-}
-
-static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- efx->msg_enable = msg_enable;
-}
-
-/**
- * efx_fill_test - fill in an individual self-test entry
- * @test_index: Index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- * @test: Pointer to test result (used only if data != %NULL)
- * @unit_format: Unit name format (e.g. "chan\%d")
- * @unit_id: Unit id (e.g. 0 for "chan0")
- * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
- * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
- *
- * Fill in an individual self-test entry.
- */
-static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
- int *test, const char *unit_format, int unit_id,
- const char *test_format, const char *test_id)
-{
- char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
-
- /* Fill data value, if applicable */
- if (data)
- data[test_index] = *test;
-
- /* Fill string, if applicable */
- if (strings) {
- if (strchr(unit_format, '%'))
- snprintf(unit_str, sizeof(unit_str),
- unit_format, unit_id);
- else
- strcpy(unit_str, unit_format);
- snprintf(test_str, sizeof(test_str), test_format, test_id);
- snprintf(strings + test_index * ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- "%-6s %-24s", unit_str, test_str);
- }
-}
-
-#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
-#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
-#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
-#define EFX_LOOPBACK_NAME(_mode, _counter) \
- "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
-
-/**
- * efx_fill_loopback_test - fill in a block of loopback self-test entries
- * @efx: Efx NIC
- * @lb_tests: Efx loopback self-test results structure
- * @mode: Loopback test mode
- * @test_index: Starting index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- *
- * Fill in a block of loopback self-test entries. Return new test
- * index.
- */
-static int efx_fill_loopback_test(struct efx_nic *efx,
- struct efx_loopback_self_tests *lb_tests,
- enum efx_loopback_mode mode,
- unsigned int test_index,
- u8 *strings, u64 *data)
-{
- struct efx_channel *channel =
- efx_get_channel(efx, efx->tx_channel_offset);
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_sent[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_sent"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_done[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_done"));
- }
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_good,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_good"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_bad,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_bad"));
-
- return test_index;
-}
-
-/**
- * efx_ethtool_fill_self_tests - get self-test details
- * @efx: Efx NIC
- * @tests: Efx self-test results structure, or %NULL
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- *
- * Get self-test number of strings, strings, and/or test results.
- * Return number of strings (== number of test results).
- *
- * The reason for merging these three functions is to make sure that
- * they can never be inconsistent.
- */
-static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
- struct efx_self_tests *tests,
- u8 *strings, u64 *data)
-{
- struct efx_channel *channel;
- unsigned int n = 0, i;
- enum efx_loopback_mode mode;
-
- efx_fill_test(n++, strings, data, &tests->phy_alive,
- "phy", 0, "alive", NULL);
- efx_fill_test(n++, strings, data, &tests->nvram,
- "core", 0, "nvram", NULL);
- efx_fill_test(n++, strings, data, &tests->interrupt,
- "core", 0, "interrupt", NULL);
-
- /* Event queues */
- efx_for_each_channel(channel, efx) {
- efx_fill_test(n++, strings, data,
- &tests->eventq_dma[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.dma", NULL);
- efx_fill_test(n++, strings, data,
- &tests->eventq_int[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.int", NULL);
- }
-
- efx_fill_test(n++, strings, data, &tests->memory,
- "core", 0, "memory", NULL);
- efx_fill_test(n++, strings, data, &tests->registers,
- "core", 0, "registers", NULL);
-
- if (efx->phy_op->run_tests != NULL) {
- EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
-
- for (i = 0; true; ++i) {
- const char *name;
-
- EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
- name = efx->phy_op->test_name(efx, i);
- if (name == NULL)
- break;
-
- efx_fill_test(n++, strings, data, &tests->phy_ext[i],
- "phy", 0, name, NULL);
- }
- }
-
- /* Loopback tests */
- for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
- if (!(efx->loopback_modes & (1 << mode)))
- continue;
- n = efx_fill_loopback_test(efx,
- &tests->loopback[mode], mode, n,
- strings, data);
- }
-
- return n;
-}
-
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
-{
- size_t n_stats = 0;
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_tx_queues(channel)) {
- n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_TXQ_TYPES);
-
- strings += ETH_GSTRING_LEN;
- }
- }
- }
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel)) {
- n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
- }
- }
- if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
- unsigned short xdp;
-
- for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
- n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
- }
- }
-
- return n_stats;
-}
-
-static int efx_ethtool_get_sset_count(struct net_device *net_dev,
- int string_set)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- switch (string_set) {
- case ETH_SS_STATS:
- return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT +
- efx_describe_per_queue_stats(efx, NULL) +
- efx_ptp_describe_stats(efx, NULL);
- case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
- default:
- return -EINVAL;
- }
-}
-
-static void efx_ethtool_get_strings(struct net_device *net_dev,
- u32 string_set, u8 *strings)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int i;
-
- switch (string_set) {
- case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
- for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_ptp_describe_stats(efx, strings);
- break;
- case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
- break;
- default:
- /* No other string sets */
- break;
- }
-}
-
-static void efx_ethtool_get_stats(struct net_device *net_dev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- const struct efx_sw_stat_desc *stat;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int i;
-
- spin_lock_bh(&efx->stats_lock);
-
- /* Get NIC statistics */
- data += efx->type->update_stats(efx, data, NULL);
-
- /* Get software statistics */
- for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
- stat = &efx_sw_stat_desc[i];
- switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_nic:
- data[i] = stat->get_stat((void *)efx + stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_channel:
- data[i] = 0;
- efx_for_each_channel(channel, efx)
- data[i] += stat->get_stat((void *)channel +
- stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
- data[i] = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- data[i] +=
- stat->get_stat((void *)tx_queue
- + stat->offset);
- }
- break;
- }
- }
- data += EFX_ETHTOOL_SW_STAT_COUNT;
-
- spin_unlock_bh(&efx->stats_lock);
-
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_tx_queues(channel)) {
- *data = 0;
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- *data += tx_queue->tx_packets;
- }
- data++;
- }
- }
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel)) {
- *data = 0;
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- *data += rx_queue->rx_packets;
- }
- data++;
- }
- }
- if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
- int xdp;
-
- for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
- data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
- data++;
- }
- }
-
- efx_ptp_update_stats(efx, data);
-}
-
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
@@ -787,16 +361,6 @@ out:
return rc;
}
-static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
- struct ethtool_pauseparam *pause)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
- pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
- pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
-}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
@@ -1456,7 +1020,7 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
rc = -ENOMEM;
goto out_unlock;
}
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Initialise indir table and key to defaults */
efx_set_default_rx_indir_table(efx, ctx);
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
new file mode 100644
index 000000000000..b8d281ab6c7a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "selftest.h"
+#include "ethtool_common.h"
+
+struct efx_sw_stat_desc {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned int offset;
+ u64 (*get_stat)(void *field); /* Reader function */
+};
+
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
+ EFX_ETHTOOL_STAT(field, channel, field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+ EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
+};
+
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->msg_enable;
+}
+
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ efx->msg_enable = msg_enable;
+}
+
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str, sizeof(unit_str),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+ size_t n_stats = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_TXQ_TYPES);
+
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "rx-%d.rx_packets", channel->channel);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
+ return n_stats;
+}
+
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_describe_per_queue_stats(efx, NULL) +
+ efx_ptp_describe_stats(efx, NULL);
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+void efx_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ strings += (efx_describe_per_queue_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ efx_ptp_describe_stats(efx, strings);
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ const struct efx_sw_stat_desc *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int i;
+
+ spin_lock_bh(&efx->stats_lock);
+
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
+
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ *data = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ *data += tx_queue->tx_packets;
+ }
+ data++;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ *data = 0;
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ *data += rx_queue->rx_packets;
+ }
+ data++;
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
+
+ efx_ptp_update_stats(efx, data);
+}
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
new file mode 100644
index 000000000000..fa624313f330
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ETHTOOL_COMMON_H
+#define EFX_ETHTOOL_COMMON_H
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info);
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause);
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data);
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
+void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
+ u8 *strings);
+void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats __attribute__ ((unused)),
+ u64 *data);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index eecc348b1c32..42bcd34fc508 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1265,7 +1265,7 @@ static int ef4_init_io(struct ef4_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
@@ -2108,7 +2108,7 @@ static void ef4_net_stats(struct net_device *net_dev,
}
/* Context: netif_tx_lock held, BHs disabled. */
-static void ef4_watchdog(struct net_device *net_dev)
+static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct ef4_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index eedd32e2bfcb..dbbb898adddb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -15,6 +15,7 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "rx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 9081f84a2604..54a45010b576 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,11 +346,8 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx);
int efx_mcdi_port_probe(struct efx_nic *efx);
void efx_mcdi_port_remove(struct efx_nic *efx);
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-int efx_mcdi_port_get_number(struct efx_nic *efx);
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-int efx_mcdi_set_mac(struct efx_nic *efx);
-#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
new file mode 100644
index 000000000000..4310ae5bd898
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -0,0 +1,2270 @@
+#include "mcdi_filters.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
+
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static struct efx_filter_spec *
+efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static u32 efx_mcdi_filter_get_unsafe_id(u32 filter_id)
+{
+ WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
+ return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+}
+
+static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id)
+{
+ return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2);
+}
+
+static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx)
+{
+ return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx;
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void
+efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ u32 match_fields = 0, uc_match, mc_match;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /*
+ * Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+#define COPY_VALUE(value, mcdi_field) \
+ do { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(value)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &value, sizeof(value)); \
+ } while (0)
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ COPY_VALUE(spec->gen_field, mcdi_field); \
+ }
+ /*
+ * Handle encap filters first. They will always be mismatch
+ * (unknown UC or MC) filters
+ */
+ if (encap_type) {
+ /*
+ * ether_type and outer_ip_proto need to be variables
+ * because COPY_VALUE wants to memcpy them
+ */
+ __be16 ether_type =
+ htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
+ ETH_P_IPV6 : ETH_P_IP);
+ u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
+ u8 outer_ip_proto;
+
+ switch (encap_type & EFX_ENCAP_TYPES_MASK) {
+ case EFX_ENCAP_TYPE_VXLAN:
+ vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
+ /* fallthrough */
+ case EFX_ENCAP_TYPE_GENEVE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_UDP;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ /*
+ * We always need to set the type field, even
+ * though we're not matching on the TNI.
+ */
+ MCDI_POPULATE_DWORD_1(inbuf,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ vni_type);
+ break;
+ case EFX_ENCAP_TYPE_NVGRE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_GRE;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+#undef COPY_VALUE
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+}
+
+static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ struct efx_rss_context *ctx,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 flags = spec->flags;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
+
+ /* If RSS filter, caller better have given us an RSS context */
+ if (flags & EFX_FILTER_FLAG_RX_RSS) {
+ /*
+ * We don't have the ability to return an error, so we'll just
+ * log a warning and disable RSS for the filter.
+ */
+ if (WARN_ON_ONCE(!ctx))
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+ else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID))
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+ }
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
+}
+
+static int efx_mcdi_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec, u64 *handle,
+ struct efx_rss_context *ctx, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc && spec->priority != EFX_FILTER_PRI_HINT)
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
+ outbuf, outlen, rc);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
+ return rc;
+}
+
+static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ unsigned int match_flags = spec->match_flags;
+ unsigned int uc_match, mc_match;
+ u32 mcdi_flags = 0;
+
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << ((encap) ? \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
+ mcdi_field ## _LBN : \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
+ mcdi_field ## _LBN)); \
+ }
+ /* inner or outer based on encap type */
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
+ /* always outer */
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* special handling for encap type, and mismatch */
+ if (encap_type) {
+ match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
+ mcdi_flags |=
+ (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ }
+
+ /* Did we map them all? */
+ WARN_ON_ONCE(match_flags);
+
+ return mcdi_flags;
+}
+
+static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table,
+ const struct efx_filter_spec *spec)
+{
+ u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table;
+ struct efx_filter_spec *saved_spec;
+ struct efx_rss_context *ctx = NULL;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool rss_locked = false;
+ bool replacing = false;
+ unsigned int depth, i;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+ table = efx->filter_state;
+ down_write(&table->lock);
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_filter_pri(table, spec);
+ if (rc < 0)
+ goto out_unlock;
+ match_pri = rc;
+
+ hash = efx_filter_spec_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ mutex_lock(&efx->rss_lock);
+ rss_locked = true;
+ if (spec->rss_context)
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
+ else
+ ctx = &efx->rss_context;
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at.
+ */
+ for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_mcdi_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_filter_spec_equal(spec, saved_spec)) {
+ if (spec->priority < saved_spec->priority &&
+ spec->priority != EFX_FILTER_PRI_AUTO) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ break;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+ }
+
+ /* Once we reach the maximum search depth, use the first suitable
+ * slot, or return -EBUSY if there was none
+ */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Create a software table entry if necessary. */
+ saved_spec = efx_mcdi_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
+ /* Just make sure it won't be removed */
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_mcdi_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Actually insert the filter on the HW */
+ rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
+ ctx, replacing);
+
+ if (rc == -EINVAL && nic_data->must_realloc_vis)
+ /* The MC rebooted under us, causing it to reject our filter
+ * insertion as pointing to an invalid VI (spec->dmaq_id).
+ */
+ rc = -EAGAIN;
+
+ /* Finalise the software table entry */
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ } else {
+ /* We failed to replace, so the old filter is still present.
+ * Roll back the software table to reflect this. In fact the
+ * efx_mcdi_filter_set_entry() call below will do the right
+ * thing, so nothing extra is needed here.
+ */
+ }
+ efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_mcdi_filter_entry_spec(table, i);
+ priv_flags = efx_mcdi_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ }
+ efx_mcdi_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index);
+
+out_unlock:
+ if (rss_locked)
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+ return rc;
+}
+
+s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ s32 ret;
+
+ down_read(&efx->filter_sem);
+ ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal);
+ up_read(&efx->filter_sem);
+
+ return ret;
+}
+
+/*
+ * Remove a filter.
+ * If !by_index, remove by ID
+ * If by_index, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
+ * for write.
+ */
+static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
+{
+ unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec ||
+ (!by_index &&
+ efx_mcdi_filter_pri(table, spec) !=
+ efx_mcdi_filter_get_unsafe_pri(filter_id)))
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ return 0;
+ }
+
+ if (!(priority_mask & (1U << spec->priority)))
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ (efx_rss_active(&efx->rss_context) ?
+ EFX_FILTER_FLAG_RX_RSS : 0));
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = 0;
+ rc = efx_mcdi_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ &efx->rss_context,
+ true);
+
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if ((rc == 0) || (rc == -ENOENT)) {
+ /* Filter removed OK or didn't actually exist */
+ kfree(spec);
+ efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
+ MC_CMD_FILTER_OP_EXT_IN_LEN,
+ NULL, 0, rc);
+ }
+ }
+
+ return rc;
+}
+
+/* Remove filters that weren't renewed. */
+static void efx_mcdi_filter_remove_old(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ int remove_failed = 0;
+ int remove_noent = 0;
+ int rc;
+ int i;
+
+ down_write(&table->lock);
+ for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
+ if (READ_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ rc = efx_mcdi_filter_remove_internal(efx,
+ 1U << EFX_FILTER_PRI_AUTO, i, true);
+ if (rc == -ENOENT)
+ remove_noent++;
+ else if (rc)
+ remove_failed++;
+ }
+ }
+ up_write(&table->lock);
+
+ if (remove_failed)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d filters\n",
+ __func__, remove_failed);
+ if (remove_noent)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d non-existent filters\n",
+ __func__, remove_noent);
+}
+
+int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_mcdi_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
+ false);
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+/* Caller must hold efx->filter_sem for read */
+static void efx_mcdi_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (filter_id == EFX_EF10_FILTER_ID_INVALID)
+ return;
+
+ down_write(&table->lock);
+ efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
+ true);
+ up_write(&table->lock);
+}
+
+int efx_mcdi_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
+ const struct efx_filter_spec *saved_spec;
+ struct efx_mcdi_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+ saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_mcdi_filter_pri(table, saved_spec) ==
+ efx_mcdi_filter_get_unsafe_pri(filter_id)) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan,
+ bool multicast, bool rollback)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_dev_addr *addr_list;
+ enum efx_filter_flags filter_flags;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ unsigned int i, j;
+ int addr_count;
+ u16 *ids;
+ int rc;
+
+ if (multicast) {
+ addr_list = table->dev_mc_list;
+ addr_count = table->dev_mc_count;
+ ids = vlan->mc;
+ } else {
+ addr_list = table->dev_uc_list;
+ addr_count = table->dev_uc_count;
+ ids = vlan->uc;
+ }
+
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ /* Insert/renew filters */
+ for (i = 0; i < addr_count; i++) {
+ EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+ efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ if (rollback) {
+ netif_info(efx, drv, efx->net_dev,
+ "efx_mcdi_filter_insert failed rc=%d\n",
+ rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ /* keep invalid ID, and carry on */
+ }
+ } else {
+ ids[i] = efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+
+ if (multicast && rollback) {
+ /* Also need an Ethernet broadcast filter */
+ EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n", rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan,
+ enum efx_encap_type encap_type,
+ bool multicast, bool rollback)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ enum efx_filter_flags filter_flags;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ int rc;
+ u16 *id;
+
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+
+ if (multicast)
+ efx_filter_set_mc_def(&spec);
+ else
+ efx_filter_set_uc_def(&spec);
+
+ if (encap_type) {
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx_filter_set_encap_type(&spec, encap_type);
+ else
+ /*
+ * don't insert encap filters on non-supporting
+ * platforms. ID will be left as INVALID.
+ */
+ return 0;
+ }
+
+ if (vlan->vid != EFX_FILTER_VID_UNSPEC)
+ efx_filter_set_eth_local(&spec, vlan->vid, NULL);
+
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ const char *um = multicast ? "Multicast" : "Unicast";
+ const char *encap_name = "";
+ const char *encap_ipv = "";
+
+ if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_VXLAN)
+ encap_name = "VXLAN ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_NVGRE)
+ encap_name = "NVGRE ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_GENEVE)
+ encap_name = "GENEVE ";
+ if (encap_type & EFX_ENCAP_FLAG_IPV6)
+ encap_ipv = "IPv6 ";
+ else if (encap_type)
+ encap_ipv = "IPv4 ";
+
+ /*
+ * unprivileged functions can't insert mismatch filters
+ * for encapsulated or unicast traffic, so downgrade
+ * those warnings to debug.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ rc == -EPERM && (encap_type || !multicast), warn,
+ "%s%s%s mismatch filter insert failed rc=%d\n",
+ encap_name, encap_ipv, um, rc);
+ } else if (multicast) {
+ /* mapping from encap types to default filter IDs (multicast) */
+ static enum efx_mcdi_filter_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_MCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = efx_mcdi_filter_get_unsafe_id(rc);
+ if (!nic_data->workaround_26807 && !encap_type) {
+ /* Also need an Ethernet broadcast filter */
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags, 0);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n",
+ rc);
+ if (rollback) {
+ /* Roll back the mc_def filter */
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ *id);
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ return rc;
+ }
+ } else {
+ EFX_WARN_ON_PARANOID(
+ vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+ rc = 0;
+ } else {
+ /* mapping from encap types to default filter IDs (unicast) */
+ static enum efx_mcdi_filter_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_UCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = rc;
+ rc = 0;
+ }
+ return rc;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /*
+ * Do not install unspecified VID if VLAN filtering is enabled.
+ * Do not install all specified VIDs if VLAN filtering is disabled.
+ */
+ if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
+ return;
+
+ /* Insert/renew unicast filters */
+ if (table->uc_promisc) {
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
+ false, false);
+ efx_mcdi_filter_insert_addr_list(efx, vlan, false, false);
+ } else {
+ /*
+ * If any of the filters failed to insert, fall back to
+ * promiscuous mode - add in the uc_def filter. But keep
+ * our individual unicast filters.
+ */
+ if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false))
+ efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ false, false);
+ }
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+
+ /*
+ * Insert/renew multicast filters
+ *
+ * If changing promiscuous state with cascaded multicast filters, remove
+ * old filters first, so that packets are dropped rather than duplicated
+ */
+ if (nic_data->workaround_26807 &&
+ table->mc_promisc_last != table->mc_promisc)
+ efx_mcdi_filter_remove_old(efx);
+ if (table->mc_promisc) {
+ if (nic_data->workaround_26807) {
+ /*
+ * If we failed to insert promiscuous filters, rollback
+ * and fall back to individual multicast filters
+ */
+ if (efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true)) {
+ /* Changing promisc state, so remove old filters */
+ efx_mcdi_filter_remove_old(efx);
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ } else {
+ /*
+ * If we failed to insert promiscuous filters, don't
+ * rollback. Regardless, also insert the mc_list,
+ * unless it's incomplete due to overflow
+ */
+ efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, false);
+ if (!table->mc_overflow)
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ } else {
+ /*
+ * If any filters failed to insert, rollback and fall back to
+ * promiscuous mode - mc_def filter and maybe broadcast. If
+ * that fails, roll back again and insert as many of our
+ * individual multicast filters as we can.
+ */
+ if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ if (nic_data->workaround_26807)
+ efx_mcdi_filter_remove_old(efx);
+ if (efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true))
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ }
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+}
+
+int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_mcdi_filter_table *table;
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
+ rc = efx_mcdi_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ break;
+ rc = 0;
+ }
+
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_mcdi_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_mcdi_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return count;
+}
+
+u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2;
+}
+
+s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_mcdi_filter_table *table;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] =
+ efx_mcdi_filter_make_filter_id(
+ efx_mcdi_filter_pri(table, spec),
+ filter_idx);
+ }
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return count;
+}
+
+static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) do { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ } while (0)
+
+ if (encap) {
+ /* encap filters must specify encap type */
+ match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ /* and imply ethertype and ip proto */
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ /* VLAN tags refer to the outer packet */
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ /* everything else refers to the inner packet */
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, IFRM_SRC_IP);
+ MAP_FLAG(LOC_HOST, IFRM_DST_IP);
+ MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
+ MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
+ MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
+ MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
+ MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
+ MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
+ } else {
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+ }
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
+ bool encap,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+ int mf;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++) {
+ mf = efx_mcdi_filter_match_flags_from_mcdi(encap,
+ table->rx_match_mcdi_flags[match_pri]);
+ if (mf == match_flags)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
+ struct efx_mcdi_filter_table *table,
+ bool encap)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ size_t outlen;
+ int rc;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ return rc;
+
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
+ table->rx_match_count++;
+ }
+ }
+
+ return 0;
+}
+
+int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_mcdi_filter_table *table;
+ struct efx_mcdi_filter_vlan *vlan;
+ int rc;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->rx_match_count = 0;
+ rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
+ if (rc)
+ goto fail;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
+ if (rc)
+ goto fail;
+ if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(efx_mcdi_filter_match_supported(table, false,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
+ efx_mcdi_filter_match_supported(table, false,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
+ netif_info(efx, probe, net_dev,
+ "VLAN filters are not supported in this firmware variant\n");
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS,
+ sizeof(*table->entry)));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ table->mc_promisc_last = false;
+ table->vlan_filter =
+ !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ INIT_LIST_HEAD(&table->vlan_list);
+ init_rwsem(&table->lock);
+
+ efx->filter_state = table;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
+fail:
+ kfree(table);
+ return rc;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+void efx_mcdi_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int invalid_filters = 0, failed = 0;
+ struct efx_mcdi_filter_vlan *vlan;
+ struct efx_filter_spec *spec;
+ struct efx_rss_context *ctx;
+ unsigned int filter_idx;
+ u32 mcdi_flags;
+ int match_pri;
+ int rc, i;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ if (!table)
+ return;
+
+ down_write(&table->lock);
+ mutex_lock(&efx->rss_lock);
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
+ match_pri = 0;
+ while (match_pri < table->rx_match_count &&
+ table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
+ ++match_pri;
+ if (match_pri >= table->rx_match_count) {
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (spec->rss_context)
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
+ else
+ ctx = &efx->rss_context;
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ if (!ctx) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
+ spec->rss_context);
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
+ spec->rss_context);
+ invalid_filters++;
+ goto not_restored;
+ }
+ }
+
+ rc = efx_mcdi_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ ctx, false);
+ if (rc)
+ failed++;
+
+ if (rc) {
+not_restored:
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
+ if (vlan->default_filters[i] == filter_idx)
+ vlan->default_filters[i] =
+ EFX_EF10_FILTER_ID_INVALID;
+
+ kfree(spec);
+ efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+
+ /*
+ * This can happen validly if the MC's capabilities have changed, so
+ * is not an error.
+ */
+ if (invalid_filters)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Did not restore %u filters that are now unsupported.\n",
+ invalid_filters);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore %u filters\n", failed);
+ else
+ nic_data->must_restore_filters = false;
+}
+
+void efx_mcdi_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ efx_mcdi_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
+ /*
+ * If we were called without locking, then it's not safe to free
+ * the table as others might be using it. So we just WARN, leak
+ * the memory, and potentially get an inconsistent filter table
+ * state.
+ * This should never actually happen.
+ */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ if (rc)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: filter %04x remove failed\n",
+ __func__, filter_idx);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+
+ efx_rwsem_assert_write_locked(&table->lock);
+
+ if (*id != EFX_EF10_FILTER_ID_INVALID) {
+ filter_idx = efx_mcdi_filter_get_unsafe_id(*id);
+ if (!table->entry[filter_idx].spec)
+ netif_dbg(efx, drv, efx->net_dev,
+ "marked null spec old %04x:%04x\n", *id,
+ filter_idx);
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ }
+}
+
+/* Mark old per-VLAN filters that may need to be removed */
+static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ unsigned int i;
+
+ for (i = 0; i < table->dev_uc_count; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]);
+ for (i = 0; i < table->dev_mc_count; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]);
+}
+
+/*
+ * Mark old filters that may need to be removed.
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+static void efx_mcdi_filter_mark_old(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+
+ down_write(&table->lock);
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ _efx_mcdi_filter_vlan_mark_old(efx, vlan);
+ up_write(&table->lock);
+}
+
+int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+ unsigned int i;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ vlan = efx_mcdi_filter_find_vlan(efx, vid);
+ if (WARN_ON(vlan)) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ return -EALREADY;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
+
+ list_add_tail(&vlan->list, &table->vlan_list);
+
+ if (efx_dev_registered(efx))
+ efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
+
+ return 0;
+}
+
+static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ unsigned int i;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ list_del(&vlan->list);
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->uc[i]);
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->mc[i]);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->default_filters[i]);
+
+ kfree(vlan);
+}
+
+void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_mcdi_filter_vlan *vlan;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ vlan = efx_mcdi_filter_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u not found in filter state\n", vid);
+ return;
+ }
+
+ efx_mcdi_filter_del_vlan_internal(efx, vlan);
+}
+
+struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx,
+ u16 vid)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ list_for_each_entry(vlan, &table->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan, *next_vlan;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
+ efx_mcdi_filter_del_vlan_internal(efx, vlan);
+}
+
+static void efx_mcdi_filter_uc_addr_list(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *uc;
+ unsigned int i;
+
+ table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->uc_promisc = true;
+ break;
+ }
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ i++;
+ }
+
+ table->dev_uc_count = i;
+}
+
+static void efx_mcdi_filter_mc_addr_list(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *mc;
+ unsigned int i;
+
+ table->mc_overflow = false;
+ table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
+
+ i = 0;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->mc_promisc = true;
+ table->mc_overflow = true;
+ break;
+ }
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ i++;
+ }
+
+ table->dev_mc_count = i;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_mcdi_filter_vlan *vlan;
+ bool vlan_filter;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_mcdi_filter_mark_old(efx);
+
+ /*
+ * Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_mcdi_filter_uc_addr_list(efx);
+ efx_mcdi_filter_mc_addr_list(efx);
+ netif_addr_unlock_bh(net_dev);
+
+ /*
+ * If VLAN filtering changes, all old filters are finally removed.
+ * Do it in advance to avoid conflicts for unicast untagged and
+ * VLAN 0 tagged filters.
+ */
+ vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (table->vlan_filter != vlan_filter) {
+ table->vlan_filter = vlan_filter;
+ efx_mcdi_filter_remove_old(efx);
+ }
+
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
+
+ efx_mcdi_filter_remove_old(efx);
+ table->mc_promisc_last = table->mc_promisc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_filter_spec *spec, saved_spec;
+ struct efx_mcdi_filter_table *table;
+ struct efx_arfs_rule *rule = NULL;
+ bool ret = true, force = false;
+ u16 arfs_id;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+
+ if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
+ goto out_unlock;
+
+ spin_lock_bh(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always return 0 to ARFS. */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, spec);
+ if (!rule)
+ /* ARFS table doesn't know of this filter, so remove it */
+ goto expire;
+ arfs_id = rule->arfs_id;
+ ret = efx_rps_check_rule(rule, filter_idx, &force);
+ if (force)
+ goto expire;
+ if (!ret) {
+ spin_unlock_bh(&efx->rps_hash_lock);
+ goto out_unlock;
+ }
+ }
+ if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
+ ret = false;
+ else if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+expire:
+ saved_spec = *spec; /* remove operation will kfree spec */
+ spin_unlock_bh(&efx->rps_hash_lock);
+ /*
+ * At this point (since we dropped the lock), another thread might queue
+ * up a fresh insertion request (but the actual insertion will be held
+ * up by our possession of the filter table lock). In that case, it
+ * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
+ * the rule is not removed by efx_rps_hash_del() below.
+ */
+ if (ret)
+ ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority,
+ filter_idx, true) == 0;
+ /*
+ * While we can't safely dereference rule (we dropped the lock), we can
+ * still test it for NULL.
+ */
+ if (ret && rule) {
+ /* Expiring, so remove entry from ARFS table */
+ spin_lock_bh(&efx->rps_hash_lock);
+ efx_rps_hash_del(efx, &saved_spec);
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
+out_unlock:
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return ret;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
+ 1 << RSS_MODE_HASH_DST_ADDR_LBN)
+#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
+ 1 << RSS_MODE_HASH_DST_PORT_LBN)
+#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
+
+int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
+{
+ /*
+ * Firmware had a bug (sfc bug 61952) where it would not actually
+ * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
+ * This meant that it would always contain whatever was previously
+ * in the MCDI buffer. Fortunately, all firmware versions with
+ * this bug have the same default flags value for a newly-allocated
+ * RSS context, and the only time we want to get the flags is just
+ * after allocating. Moreover, the response has a 32-bit hole
+ * where the context ID would be in the request, so we can use an
+ * overlength buffer in the request and pre-fill the flags field
+ * with what we believe the default to be. Thus if the firmware
+ * has the bug, it will leave our pre-filled value in the flags
+ * field of the response, and we will get the right answer.
+ *
+ * However, this does mean that this function should NOT be used if
+ * the RSS context flags might not be their defaults - it is ONLY
+ * reliably correct for a newly-allocated RSS context.
+ */
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ /* Check we have a hole for the context ID */
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
+ RSS_CONTEXT_FLAGS_DEFAULT);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc == 0) {
+ if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
+ rc = -EIO;
+ else
+ *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
+ }
+ return rc;
+}
+
+/*
+ * Attempt to enable 4-tuple UDP hashing on the specified RSS context.
+ * If we fail, we just leave the RSS context at its default hash settings,
+ * which is safe but may slightly reduce performance.
+ * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
+ * just need to set the UDP ports flags (for both IP versions).
+ */
+void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
+ u32 flags;
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
+
+ if (efx_mcdi_get_rss_context_flags(efx, ctx->context_id, &flags) != 0)
+ return;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
+ if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL))
+ /* Succeeded, so UDP 4-tuple is now enabled */
+ ctx->rx_hash_udp_4tuple = true;
+}
+
+static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
+ struct efx_rss_context *ctx,
+ unsigned *context_size)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ return -EOPNOTSUPP;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ if (context_size)
+ *context_size = rss_spread;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ efx_mcdi_set_rss_context_flags(efx, ctx);
+
+ return 0;
+}
+
+static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table, const u8 *key)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ /* This iterates over the length of efx->rss_context.rx_indir_table, but
+ * copies bytes from rx_indir_table. That's because the latter is a
+ * pointer rather than an array, but should have the same length.
+ * The efx->rss_context.rx_hash_key loop below is similar.
+ */
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id);
+ WARN_ON(rc != 0);
+ }
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+}
+
+static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
+ context_size);
+
+ if (rc != 0)
+ return rc;
+
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ return 0;
+}
+
+static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ u32 old_rx_rss_context = efx->rss_context.context_id;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
+ NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ }
+
+ rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id,
+ rx_indir_table, key);
+ if (rc != 0)
+ goto fail2;
+
+ if (efx->rss_context.context_id != old_rx_rss_context &&
+ old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rss_context.rx_indir_table)
+ memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (key != efx->rss_context.rx_hash_key)
+ memcpy(efx->rss_context.rx_hash_key, key,
+ efx->type->rx_hash_key_size);
+
+ return 0;
+
+fail2:
+ if (old_rx_rss_context != efx->rss_context.context_id) {
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0);
+ efx->rss_context.context_id = old_rx_rss_context;
+ }
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
+ if (rc)
+ return rc;
+ }
+
+ if (!rx_indir_table) /* Delete this context */
+ return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
+
+ rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
+ rx_indir_table, key);
+ if (rc)
+ return rc;
+
+ memcpy(ctx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
+
+ return 0;
+}
+
+int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
+ size_t outlen;
+ int rc, i;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
+ MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
+
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
+ return -ENOENT;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
+ tablebuf, sizeof(tablebuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
+ RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
+ keybuf, sizeof(keybuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
+ ctx->rx_hash_key[i] = MCDI_PTR(
+ keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
+
+ return 0;
+}
+
+int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
+{
+ int rc;
+
+ mutex_lock(&efx->rss_lock);
+ rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
+ mutex_unlock(&efx->rss_lock);
+ return rc;
+}
+
+void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_rss_context *ctx;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (!nic_data->must_restore_rss_contexts)
+ return;
+
+ list_for_each_entry(ctx, &efx->rss_context.list, list) {
+ /* previous NIC RSS context is gone */
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ /* so try to allocate a new one */
+ rc = efx_mcdi_rx_push_rss_context_config(efx, ctx,
+ ctx->rx_indir_table,
+ ctx->rx_hash_key);
+ if (rc)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore RSS context %u, rc=%d"
+ "; RSS filters may fail to be applied\n",
+ ctx->user_id, rc);
+ }
+ nic_data->must_restore_rss_contexts = false;
+}
+
+int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+
+ rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0;
+ i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_mcdi_filter_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
+ __attribute__ ((unused)))
+{
+ if (user)
+ return -EOPNOTSUPP;
+ if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
new file mode 100644
index 000000000000..1837f4f5d661
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FILTERS_H
+#define EFX_MCDI_FILTERS_H
+
+#include "net_driver.h"
+#include "filter.h"
+#include "mcdi_pcol.h"
+
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+
+enum efx_mcdi_filter_default_filters {
+ EFX_EF10_BCAST,
+ EFX_EF10_UCDEF,
+ EFX_EF10_MCDEF,
+ EFX_EF10_VXLAN4_UCDEF,
+ EFX_EF10_VXLAN4_MCDEF,
+ EFX_EF10_VXLAN6_UCDEF,
+ EFX_EF10_VXLAN6_MCDEF,
+ EFX_EF10_NVGRE4_UCDEF,
+ EFX_EF10_NVGRE4_MCDEF,
+ EFX_EF10_NVGRE6_UCDEF,
+ EFX_EF10_NVGRE6_MCDEF,
+ EFX_EF10_GENEVE4_UCDEF,
+ EFX_EF10_GENEVE4_MCDEF,
+ EFX_EF10_GENEVE6_UCDEF,
+ EFX_EF10_GENEVE6_MCDEF,
+
+ EFX_EF10_NUM_DEFAULT_FILTERS
+};
+
+/* Per-VLAN filters information */
+struct efx_mcdi_filter_vlan {
+ struct list_head list;
+ u16 vid;
+ u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
+ u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
+ u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
+};
+
+struct efx_mcdi_dev_addr {
+ u8 addr[ETH_ALEN];
+};
+
+struct efx_mcdi_filter_table {
+/* The MCDI match masks supported by this fw & hw, in order of priority */
+ u32 rx_match_mcdi_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
+ unsigned int rx_match_count;
+
+ struct rw_semaphore lock; /* Protects entries */
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
+/* unused flag 1UL */
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+/* Shadow of net_device address lists, guarded by mac_lock */
+ struct efx_mcdi_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+ struct efx_mcdi_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count;
+ int dev_mc_count;
+ bool uc_promisc;
+ bool mc_promisc;
+/* Whether in multicast promiscuous mode when last changed */
+ bool mc_promisc_last;
+ bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ bool vlan_filter;
+ struct list_head vlan_list;
+};
+
+int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+void efx_mcdi_filter_table_remove(struct efx_nic *efx);
+void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+
+/*
+ * The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define EFX_MCDI_FILTER_TBL_ROWS 8192
+
+bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
+ bool encap,
+ enum efx_filter_match_flags match_flags);
+
+void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx);
+s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace_equal);
+int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_mcdi_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec);
+
+u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+
+void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx);
+int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid);
+struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx, u16 vid);
+void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
+
+void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
+int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key);
+int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table,
+ const u8 *key);
+int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
+ __attribute__ ((unused)));
+int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
+int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
+ u32 *flags);
+void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
+
+static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here */
+}
+
+bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
new file mode 100644
index 000000000000..dcfe78b0fa5a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_functions.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+int efx_mcdi_free_vis(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
+ return rc;
+}
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ if (vi_base)
+ *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ if (allocated_vis)
+ *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+int efx_mcdi_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc, i;
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ if (v2) {
+ /* Use the new generic approach to specifying event queue
+ * configuration, requesting lower latency or higher throughput.
+ * The options that actually get used appear in the output.
+ */
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_V2_IN_FLAG_TYPE,
+ MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
+ } else {
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
+ }
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %d using event queue flags %08x\n",
+ channel->channel,
+ MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+
+ return rc;
+}
+
+void efx_mcdi_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+void efx_mcdi_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ef10_nic_data *nic_data;
+ dma_addr_t dma_addr;
+ size_t inlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+
+ nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ do {
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
+ /* This flag was removed from mcdi_pcol.h for
+ * the non-_EXT version of INIT_TXQ. However,
+ * firmware still honours it.
+ */
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+ tx_queue->timestamping);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc == -ENOSPC && tso_v2) {
+ /* Retry without TSOv2 if we're short on contexts. */
+ tso_v2 = false;
+ netif_warn(efx, probe, efx->net_dev,
+ "TSOv2 context not available to segment in "
+ "hardware. TCP performance may be reduced.\n"
+ );
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
+ MC_CMD_INIT_TXQ_EXT_IN_LEN,
+ NULL, 0, rc);
+ goto fail;
+ }
+ } while (rc);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ dma_addr_t dma_addr;
+ size_t inlen;
+ int rc;
+ int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc)
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
+}
+
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
+{
+ switch (vi_window_mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ efx->vi_stride = 8192;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ efx->vi_stride = 16384;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ efx->vi_stride = 65536;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev,
+ "Unrecognised VI window mode %d\n",
+ vi_window_mode);
+ return -EIO;
+ }
+ netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+ efx->vi_stride);
+ return 0;
+}
+
+int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h
new file mode 100644
index 000000000000..ca4a5ac1a66b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FUNCTIONS_H
+#define EFX_MCDI_FUNCTIONS_H
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis);
+int efx_mcdi_free_vis(struct efx_nic *efx);
+
+int efx_mcdi_ev_probe(struct efx_channel *channel);
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
+void efx_mcdi_ev_remove(struct efx_channel *channel);
+void efx_mcdi_ev_fini(struct efx_channel *channel);
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
+int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
+int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb7cde4980ed..ab5227b13ae6 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -14,106 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "selftest.h"
-
-struct efx_mcdi_phy_data {
- u32 flags;
- u32 type;
- u32 supported_cap;
- u32 channel;
- u32 port;
- u32 stats_mask;
- u8 name[20];
- u32 media;
- u32 mmd_mask;
- u8 revision[20];
- u32 forced_cap;
-};
-
-static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
- BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EIO;
- goto fail;
- }
-
- cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
- cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
- cfg->supported_cap =
- MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
- cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
- cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
- cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
- memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
- sizeof(cfg->name));
- cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
- cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
- memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
- sizeof(cfg->revision));
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
- u32 flags, u32 loopback_mode,
- u32 loopback_speed)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
- int rc;
-
- BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
-
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- return rc;
-}
-
-static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
- MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
- rc = -EIO;
- goto fail;
- }
-
- *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
+#include "mcdi_port_common.h"
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
@@ -168,246 +69,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
-{
- #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
- linkset)
-
- bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
- switch (media) {
- case MC_CMD_MEDIA_KX4:
- SET_BIT(Backplane);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseKX_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseKX4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- SET_BIT(40000baseKR4_Full);
- break;
-
- case MC_CMD_MEDIA_XFP:
- case MC_CMD_MEDIA_SFP_PLUS:
- case MC_CMD_MEDIA_QSFP_PLUS:
- SET_BIT(FIBRE);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- SET_BIT(40000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
- SET_BIT(100000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
- SET_BIT(25000baseCR_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
- SET_BIT(50000baseCR2_Full);
- break;
-
- case MC_CMD_MEDIA_BASE_T:
- SET_BIT(TP);
- if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- SET_BIT(10baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- SET_BIT(10baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- SET_BIT(100baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- SET_BIT(100baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- SET_BIT(1000baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- break;
- }
-
- if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- SET_BIT(Pause);
- if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- SET_BIT(Asym_Pause);
- if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- SET_BIT(Autoneg);
-
- #undef SET_BIT
-}
-
-static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
-{
- u32 result = 0;
-
- #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
- linkset)
-
- if (TEST_BIT(10baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (TEST_BIT(10baseT_Full))
- result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (TEST_BIT(100baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (TEST_BIT(100baseT_Full))
- result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (TEST_BIT(1000baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
- result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
- result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
- result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (TEST_BIT(100000baseCR4_Full))
- result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
- if (TEST_BIT(25000baseCR_Full))
- result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
- if (TEST_BIT(50000baseCR2_Full))
- result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
- if (TEST_BIT(Pause))
- result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (TEST_BIT(Asym_Pause))
- result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (TEST_BIT(Autoneg))
- result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
-
- #undef TEST_BIT
-
- return result;
-}
-
-static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- enum efx_phy_mode mode, supported;
- u32 flags;
-
- /* TODO: Advertise the capabilities supported by this PHY */
- supported = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
- supported |= PHY_MODE_TX_DISABLED;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
- supported |= PHY_MODE_LOW_POWER;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
- supported |= PHY_MODE_OFF;
-
- mode = efx->phy_mode & supported;
-
- flags = 0;
- if (mode & PHY_MODE_TX_DISABLED)
- flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
- if (mode & PHY_MODE_LOW_POWER)
- flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
- if (mode & PHY_MODE_OFF)
- flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
-
- return flags;
-}
-
-static u8 mcdi_to_ethtool_media(u32 media)
-{
- switch (media) {
- case MC_CMD_MEDIA_XAUI:
- case MC_CMD_MEDIA_CX4:
- case MC_CMD_MEDIA_KX4:
- return PORT_OTHER;
-
- case MC_CMD_MEDIA_XFP:
- case MC_CMD_MEDIA_SFP_PLUS:
- case MC_CMD_MEDIA_QSFP_PLUS:
- return PORT_FIBRE;
-
- case MC_CMD_MEDIA_BASE_T:
- return PORT_TP;
-
- default:
- return PORT_OTHER;
- }
-}
-
-static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- /* Fall through */
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
-/* The semantics of the ethtool FEC mode bitmask are not well defined,
- * particularly the meaning of combinations of bits. Which means we get to
- * define our own semantics, as follows:
- * OFF overrides any other bits, and means "disable all FEC" (with the
- * exception of 25G KR4/CR4, where it is not possible to reject it if AN
- * partner requests it).
- * AUTO on its own means use cable requirements and link partner autoneg with
- * fw-default preferences for the cable type.
- * AUTO and either RS or BASER means use the specified FEC type if cable and
- * link partner support it, otherwise autoneg/fw-default.
- * RS or BASER alone means use the specified FEC type if cable and link partner
- * support it and either requests it, otherwise no FEC.
- * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
- * partner support it, preferring RS to BASER.
- */
-static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
-{
- u32 ret = 0;
-
- if (ethtool_cap & ETHTOOL_FEC_OFF)
- return 0;
-
- if (ethtool_cap & ETHTOOL_FEC_AUTO)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
- if (ethtool_cap & ETHTOOL_FEC_RS)
- ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
- if (ethtool_cap & ETHTOOL_FEC_BASER)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
- return ret;
-}
-
-/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
- * can never produce, (baser xor rs) and neither req; the implementation below
- * maps both of those to AUTO. This should never matter, and it's not clear
- * what a better mapping would be anyway.
- */
-static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
-{
- bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
- rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
- baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
- : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
- baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
- : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
-
- if (!baser && !rs)
- return ETHTOOL_FEC_OFF;
- return (rs_req ? ETHTOOL_FEC_RS : 0) |
- (baser_req ? ETHTOOL_FEC_BASER : 0) |
- (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
-}
-
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
@@ -527,58 +188,6 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
- * supported by the link partner. Warn the user if this isn't the case
- */
-static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 rmtadv;
-
- /* The link partner capabilities are only relevant if the
- * link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- return;
-
- /* If flow control autoneg is supported and enabled, then fine */
- if (efx->wanted_fc & EFX_FC_AUTO)
- return;
-
- rmtadv = 0;
- if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- rmtadv |= ADVERTISED_Pause;
- if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- rmtadv |= ADVERTISED_Asym_Pause;
-
- if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
- netif_err(efx, link, efx->net_dev,
- "warning: link partner doesn't support pause frames");
-}
-
-static bool efx_mcdi_phy_poll(struct efx_nic *efx)
-{
- struct efx_link_state old_state = efx->link_state;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- efx->link_state.up = false;
- else
- efx_mcdi_phy_decode_link(
- efx, &efx->link_state,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
-
- return !efx_link_state_equal(&efx->link_state, &old_state);
-}
-
static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -666,58 +275,6 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
- struct ethtool_fecparam *fec)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
- u32 caps, active, speed; /* MCDI format */
- bool is_25g = false;
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
- return -EOPNOTSUPP;
-
- /* behaviour for 25G/50G links depends on 25G BASER bit */
- speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
- is_25g = speed == 25000 || speed == 50000;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
- fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
- /* BASER is never supported on 100G */
- if (speed == 100000)
- fec->fec &= ~ETHTOOL_FEC_BASER;
-
- active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
- switch (active) {
- case MC_CMD_FEC_NONE:
- fec->active_fec = ETHTOOL_FEC_OFF;
- break;
- case MC_CMD_FEC_BASER:
- fec->active_fec = ETHTOOL_FEC_BASER;
- break;
- case MC_CMD_FEC_RS:
- fec->active_fec = ETHTOOL_FEC_RS;
- break;
- default:
- netif_warn(efx, hw, efx->net_dev,
- "Firmware reports unrecognised FEC_TYPE %u\n",
- active);
- /* We don't know what firmware has picked. AUTO is as good a
- * "can't happen" value as any other.
- */
- fec->active_fec = ETHTOOL_FEC_AUTO;
- break;
- }
-
- return 0;
-}
-
static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
const struct ethtool_fecparam *fec)
{
@@ -745,27 +302,6 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EIO;
- if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
- return -EINVAL;
-
- return 0;
-}
-
static const char *const mcdi_sft9001_cable_diag_names[] = {
"cable.pairA.length",
"cable.pairB.length",
@@ -1139,84 +675,6 @@ u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
return phy_data->supported_cap;
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
- [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
- [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
- [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
- [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
-};
-
-void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 fcntl;
- MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
-
- BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
-
- /* This has no effect on EF10 */
- ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
- efx->net_dev->dev_addr);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* Set simple MAC filter for Siena */
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
- SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
-
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
- SET_MAC_IN_FLAG_INCLUDE_FCS,
- !!(efx->net_dev->features & NETIF_F_RXFCS));
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
@@ -1348,17 +806,3 @@ void efx_mcdi_port_remove(struct efx_nic *efx)
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
-
-/* Get physical port number (EF10 only; on Siena it is same as PF number) */
-int efx_mcdi_port_get_number(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- return rc;
-
- return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
new file mode 100644
index 000000000000..a6a072ba46d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mcdi_port_common.h"
+#include "efx_common.h"
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+ cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+ cfg->supported_cap =
+ MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+ cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+ cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+ cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+ memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+ sizeof(cfg->name));
+ cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+ memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+ sizeof(cfg->revision));
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
+{
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
+{
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ switch (media) {
+ case MC_CMD_MEDIA_KX4:
+ SET_BIT(Backplane);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseKX_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseKX4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseKR4_Full);
+ break;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ SET_BIT(FIBRE);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
+ break;
+
+ case MC_CMD_MEDIA_BASE_T:
+ SET_BIT(TP);
+ if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ SET_BIT(10baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ SET_BIT(10baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ SET_BIT(100baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ SET_BIT(100baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ SET_BIT(1000baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ break;
+ }
+
+ if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ SET_BIT(Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ SET_BIT(Asym_Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ SET_BIT(Autoneg);
+
+ #undef SET_BIT
+}
+
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
+{
+ u32 result = 0;
+
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+ if (TEST_BIT(10baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+ if (TEST_BIT(100baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+ if (TEST_BIT(100baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+ if (TEST_BIT(1000baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+ result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
+ result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+ if (TEST_BIT(Asym_Pause))
+ result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+ if (TEST_BIT(Autoneg))
+ result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+ #undef TEST_BIT
+
+ return result;
+}
+
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ enum efx_phy_mode mode, supported;
+ u32 flags;
+
+ /* TODO: Advertise the capabilities supported by this PHY */
+ supported = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
+ supported |= PHY_MODE_TX_DISABLED;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
+ supported |= PHY_MODE_LOW_POWER;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
+ supported |= PHY_MODE_OFF;
+
+ mode = efx->phy_mode & supported;
+
+ flags = 0;
+ if (mode & PHY_MODE_TX_DISABLED)
+ flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
+ if (mode & PHY_MODE_LOW_POWER)
+ flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
+ if (mode & PHY_MODE_OFF)
+ flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
+
+ return flags;
+}
+
+u8 mcdi_to_ethtool_media(u32 media)
+{
+ switch (media) {
+ case MC_CMD_MEDIA_XAUI:
+ case MC_CMD_MEDIA_CX4:
+ case MC_CMD_MEDIA_KX4:
+ return PORT_OTHER;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ return PORT_FIBRE;
+
+ case MC_CMD_MEDIA_BASE_T:
+ return PORT_TP;
+
+ default:
+ return PORT_OTHER;
+ }
+}
+
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ /* Fall through */
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
+/* The semantics of the ethtool FEC mode bitmask are not well defined,
+ * particularly the meaning of combinations of bits. Which means we get to
+ * define our own semantics, as follows:
+ * OFF overrides any other bits, and means "disable all FEC" (with the
+ * exception of 25G KR4/CR4, where it is not possible to reject it if AN
+ * partner requests it).
+ * AUTO on its own means use cable requirements and link partner autoneg with
+ * fw-default preferences for the cable type.
+ * AUTO and either RS or BASER means use the specified FEC type if cable and
+ * link partner support it, otherwise autoneg/fw-default.
+ * RS or BASER alone means use the specified FEC type if cable and link partner
+ * support it and either requests it, otherwise no FEC.
+ * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
+ * partner support it, preferring RS to BASER.
+ */
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
+{
+ u32 ret = 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_AUTO)
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_RS)
+ ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_BASER)
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ return ret;
+}
+
+/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
+ * can never produce, (baser xor rs) and neither req; the implementation below
+ * maps both of those to AUTO. This should never matter, and it's not clear
+ * what a better mapping would be anyway.
+ */
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
+{
+ bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
+ rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
+ baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
+ baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+
+ if (!baser && !rs)
+ return ETHTOOL_FEC_OFF;
+ return (rs_req ? ETHTOOL_FEC_RS : 0) |
+ (baser_req ? ETHTOOL_FEC_BASER : 0) |
+ (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 rmtadv;
+
+ /* The link partner capabilities are only relevant if the
+ * link supports flow control autonegotiation
+ */
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ return;
+
+ /* If flow control autoneg is supported and enabled, then fine */
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ return;
+
+ rmtadv = 0;
+ if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ rmtadv |= ADVERTISED_Pause;
+ if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ rmtadv |= ADVERTISED_Asym_Pause;
+
+ if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
+}
+
+bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ efx->link_state.up = false;
+ else
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
+ u32 caps, active, speed; /* MCDI format */
+ bool is_25g = false;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
+ return -EOPNOTSUPP;
+
+ /* behaviour for 25G/50G links depends on 25G BASER bit */
+ speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
+ is_25g = speed == 25000 || speed == 50000;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
+ fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
+ /* BASER is never supported on 100G */
+ if (speed == 100000)
+ fec->fec &= ~ETHTOOL_FEC_BASER;
+
+ active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
+ switch (active) {
+ case MC_CMD_FEC_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case MC_CMD_FEC_BASER:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ case MC_CMD_FEC_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ default:
+ netif_warn(efx, hw, efx->net_dev,
+ "Firmware reports unrecognised FEC_TYPE %u\n",
+ active);
+ /* We don't know what firmware has picked. AUTO is as good a
+ * "can't happen" value as any other.
+ */
+ fec->active_fec = ETHTOOL_FEC_AUTO;
+ break;
+ }
+
+ return 0;
+}
+
+int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ /* This has no effect on EF10 */
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS,
+ !!(efx->net_dev->features & NETIF_F_RXFCS));
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h
new file mode 100644
index 000000000000..b16f11265269
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_PORT_COMMON_H
+#define EFX_MCDI_PORT_COMMON_H
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed);
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
+u8 mcdi_to_ethtool_media(u32 media);
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl);
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
+bool efx_mcdi_phy_poll(struct efx_nic *efx);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec);
+int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_set_mac(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index dfd5182d9e47..9f9886f222c8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -24,7 +24,6 @@
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/vmalloc.h>
-#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <net/busy_poll.h>
#include <net/xdp.h>
@@ -139,6 +138,8 @@ struct efx_special_buffer {
* freed when descriptor completes
* @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
* member is the associated buffer to drop a page reference on.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
+ * descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -153,7 +154,7 @@ struct efx_tx_buffer {
struct xdp_frame *xdpf;
};
union {
- efx_qword_t option;
+ efx_qword_t option; /* EF10 */
dma_addr_t dma_addr;
};
unsigned short flags;
@@ -743,13 +744,13 @@ union efx_multicast_hash {
struct vfdi_status;
/* The reserved RSS context value */
-#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
* struct efx_rss_context - A user-defined RSS context for filtering
* @list: node of linked list on which this struct is stored
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
- * %EFX_EF10_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_EF10_RSS_CONTEXT_INVALID.
+ * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
+ * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
@@ -1611,6 +1612,15 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1f7c5717de75..6670fda8f35a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -9,9 +9,9 @@
#define EFX_NIC_H
#include <linux/net_tstamp.h>
-#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
#include "mcdi.h"
enum {
@@ -506,6 +506,9 @@ static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
tx_queue->efx->type->tx_write(tx_queue);
}
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
/* RX data path */
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
@@ -554,6 +557,7 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
+
void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
@@ -671,6 +675,7 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
+
int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index c29bf862a94c..a2042f16babc 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -21,6 +21,7 @@
#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
+#include "rx_common.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
@@ -32,60 +33,13 @@
/* Maximum rx prefix used by any architecture. */
#define EFX_MAX_RX_PREFIX_SIZE 16
-/* Number of RX buffers to recycle pages for. When creating the RX page recycle
- * ring, this number is divided by the number of buffers per page to calculate
- * the number of pages to store in the RX page recycle ring.
- */
-#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
-#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
-
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 128u
-/* This is the percentage fill level below which new RX descriptors
- * will be added to the RX descriptor ring.
- */
-static unsigned int rx_refill_threshold;
-
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
-/*
- * RX maximum head room required.
- *
- * This must be at least 1 to prevent overflow, plus one packet-worth
- * to allow pipelined receives.
- */
-#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-
-static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
-{
- return page_address(buf->page) + buf->page_offset;
-}
-
-static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
-{
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
-#else
- const u8 *data = eh + efx->rx_packet_hash_offset;
- return (u32)data[0] |
- (u32)data[1] << 8 |
- (u32)data[2] << 16 |
- (u32)data[3] << 24;
-#endif
-}
-
-static inline struct efx_rx_buffer *
-efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
-{
- if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
- return efx_rx_buffer(rx_queue, 0);
- else
- return rx_buf + 1;
-}
-
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
unsigned int len)
@@ -94,301 +48,6 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
DMA_FROM_DEVICE);
}
-void efx_rx_config_page_split(struct efx_nic *efx)
-{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
- XDP_PACKET_HEADROOM,
- EFX_RX_BUF_ALIGNMENT);
- efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
- ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- efx->rx_page_buf_step);
- efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
- efx->rx_bufs_per_page;
- efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
- efx->rx_bufs_per_page);
-}
-
-/* Check the RX page recycle ring for a page that can be reused. */
-static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct page *page;
- struct efx_rx_page_state *state;
- unsigned index;
-
- index = rx_queue->page_remove & rx_queue->page_ptr_mask;
- page = rx_queue->page_ring[index];
- if (page == NULL)
- return NULL;
-
- rx_queue->page_ring[index] = NULL;
- /* page_remove cannot exceed page_add. */
- if (rx_queue->page_remove != rx_queue->page_add)
- ++rx_queue->page_remove;
-
- /* If page_count is 1 then we hold the only reference to this page. */
- if (page_count(page) == 1) {
- ++rx_queue->page_recycle_count;
- return page;
- } else {
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- ++rx_queue->page_recycle_failed;
- }
-
- return NULL;
-}
-
-/**
- * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates a batch of pages, maps them for DMA, and populates
- * struct efx_rx_buffers for each one. Return a negative error code or
- * 0 on success. If a single page can be used for multiple buffers,
- * then the page will either be inserted fully, or not at all.
- */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
- struct page *page;
- unsigned int page_offset;
- struct efx_rx_page_state *state;
- dma_addr_t dma_addr;
- unsigned index, count;
-
- count = 0;
- do {
- page = efx_reuse_page(rx_queue);
- if (page == NULL) {
- page = alloc_pages(__GFP_COMP |
- (atomic ? GFP_ATOMIC : GFP_KERNEL),
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr =
- dma_map_page(&efx->pci_dev->dev, page, 0,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
- }
- state = page_address(page);
- state->dma_addr = dma_addr;
- } else {
- state = page_address(page);
- dma_addr = state->dma_addr;
- }
-
- dma_addr += sizeof(struct efx_rx_page_state);
- page_offset = sizeof(struct efx_rx_page_state);
-
- do {
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
- rx_buf->page = page;
- rx_buf->page_offset = page_offset + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
- rx_buf->len = efx->rx_dma_len;
- rx_buf->flags = 0;
- ++rx_queue->added_count;
- get_page(page);
- dma_addr += efx->rx_page_buf_step;
- page_offset += efx->rx_page_buf_step;
- } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
-
- rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
- } while (++count < efx->rx_pages_per_batch);
-
- return 0;
-}
-
-/* Unmap a DMA-mapped page. This function is only called for the final RX
- * buffer in a page.
- */
-static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
-
- if (page) {
- struct efx_rx_page_state *state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- }
-}
-
-static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs)
-{
- do {
- if (rx_buf->page) {
- put_page(rx_buf->page);
- rx_buf->page = NULL;
- }
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--num_bufs);
-}
-
-/* Attempt to recycle the page if there is an RX recycle ring; the page can
- * only be added if this is the final RX buffer, to prevent pages being used in
- * the descriptor ring and appearing in the recycle ring simultaneously.
- */
-static void efx_recycle_rx_page(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- struct efx_nic *efx = rx_queue->efx;
- unsigned index;
-
- /* Only recycle the page after processing the final buffer. */
- if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
- return;
-
- index = rx_queue->page_add & rx_queue->page_ptr_mask;
- if (rx_queue->page_ring[index] == NULL) {
- unsigned read_index = rx_queue->page_remove &
- rx_queue->page_ptr_mask;
-
- /* The next slot in the recycle ring is available, but
- * increment page_remove if the read pointer currently
- * points here.
- */
- if (read_index == index)
- ++rx_queue->page_remove;
- rx_queue->page_ring[index] = page;
- ++rx_queue->page_add;
- return;
- }
- ++rx_queue->page_recycle_full;
- efx_unmap_rx_buffer(efx, rx_buf);
- put_page(rx_buf->page);
-}
-
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- /* Release the page reference we hold for the buffer. */
- if (rx_buf->page)
- put_page(rx_buf->page);
-
- /* If this is the last buffer in a page, unmap and free it. */
- if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- }
- rx_buf->page = NULL;
-}
-
-/* Recycle the pages that are used by buffers that have just been received. */
-static void efx_recycle_rx_pages(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
-{
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-
- do {
- efx_recycle_rx_page(channel, rx_buf);
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--n_frags);
-}
-
-static void efx_discard_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
-{
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-
- efx_recycle_rx_pages(channel, rx_buf, n_frags);
-
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@max_fill. If there is insufficient atomic
- * memory to do so, a slow fill will be scheduled.
- *
- * The caller must provide serialisation (none is used here). In practise,
- * this means this function must run from the NAPI handler, or be called
- * when NAPI is disabled.
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int fill_level, batch_size;
- int space, rc = 0;
-
- if (!rx_queue->refill_enabled)
- return;
-
- /* Calculate current fill level, and exit if we don't need to fill */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
- if (fill_level >= rx_queue->fast_fill_trigger)
- goto out;
-
- /* Record minimum fill level */
- if (unlikely(fill_level < rx_queue->min_fill)) {
- if (fill_level)
- rx_queue->min_fill = fill_level;
- }
-
- batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- space = rx_queue->max_fill - fill_level;
- EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filling descriptor ring from"
- " level %d to level %d\n",
- efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill);
-
-
- do {
- rc = efx_init_rx_buffers(rx_queue, atomic);
- if (unlikely(rc)) {
- /* Ensure that we don't leave the rx queue empty */
- efx_schedule_slow_fill(rx_queue);
- goto out;
- }
- } while ((space -= batch_size) >= batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filled descriptor ring "
- "to level %d\n", efx_rx_queue_index(rx_queue),
- rx_queue->added_count - rx_queue->removed_count);
-
- out:
- if (rx_queue->notified_count != rx_queue->added_count)
- efx_nic_notify_rx_desc(rx_queue);
-}
-
-void efx_rx_slow_fill(struct timer_list *t)
-{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
-
- /* Post an event to cause NAPI to run and refill the queue */
- efx_nic_generate_fill_event(rx_queue);
- ++rx_queue->slow_fill_count;
-}
-
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len)
@@ -412,53 +71,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through GRO. GRO can handle pages
- * regardless of checksum state and skbs with a good checksum.
- */
-static void
-efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
- unsigned int n_frags, u8 *eh)
-{
- struct napi_struct *napi = &channel->napi_str;
- struct efx_nic *efx = channel->efx;
- struct sk_buff *skb;
-
- skb = napi_get_frags(napi);
- if (unlikely(!skb)) {
- struct efx_rx_queue *rx_queue;
-
- rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
- return;
- }
-
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
- PKT_HASH_TYPE_L3);
- skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
- CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
- skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
-
- for (;;) {
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buf->page, rx_buf->page_offset,
- rx_buf->len);
- rx_buf->page = NULL;
- skb->len += rx_buf->len;
- if (skb_shinfo(skb)->nr_frags == n_frags)
- break;
-
- rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
- }
-
- skb->data_len = skb->len;
- skb->truesize += n_frags * efx->rx_buffer_truesize;
-
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
- napi_gro_frags(napi);
-}
-
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
@@ -805,174 +417,6 @@ out:
channel->rx_pkt_n_frags = 0;
}
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- rx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating RX queue %d size %#x mask %#x\n",
- efx_rx_queue_index(rx_queue), efx->rxq_entries,
- rx_queue->ptr_mask);
-
- /* Allocate RX buffers */
- rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
- GFP_KERNEL);
- if (!rx_queue->buffer)
- return -ENOMEM;
-
- rc = efx_nic_probe_rx(rx_queue);
- if (rc) {
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
- }
-
- return rc;
-}
-
-static void efx_init_rx_recycle_ring(struct efx_nic *efx,
- struct efx_rx_queue *rx_queue)
-{
- unsigned int bufs_in_recycle_ring, page_ring_size;
-
- /* Set the RX recycle ring size */
-#ifdef CONFIG_PPC64
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
-#else
- if (iommu_present(&pci_bus_type))
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
- else
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
-#endif /* CONFIG_PPC64 */
-
- page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
- efx->rx_bufs_per_page);
- rx_queue->page_ring = kcalloc(page_ring_size,
- sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
-}
-
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int max_fill, trigger, max_trigger;
- int rc = 0;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- /* Initialise ptr fields */
- rx_queue->added_count = 0;
- rx_queue->notified_count = 0;
- rx_queue->removed_count = 0;
- rx_queue->min_fill = -1U;
- efx_init_rx_recycle_ring(efx, rx_queue);
-
- rx_queue->page_remove = 0;
- rx_queue->page_add = rx_queue->page_ptr_mask + 1;
- rx_queue->page_recycle_count = 0;
- rx_queue->page_recycle_failed = 0;
- rx_queue->page_recycle_full = 0;
-
- /* Initialise limit fields */
- max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger =
- max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- if (rx_refill_threshold != 0) {
- trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
- if (trigger > max_trigger)
- trigger = max_trigger;
- } else {
- trigger = max_trigger;
- }
-
- rx_queue->max_fill = max_fill;
- rx_queue->fast_fill_trigger = trigger;
- rx_queue->refill_enabled = true;
-
- /* Initialise XDP queue information */
- rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
- rx_queue->core_index);
-
- if (rc) {
- netif_err(efx, rx_err, efx->net_dev,
- "Failure to initialise XDP queue information rc=%d\n",
- rc);
- efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
- }
-
- /* Set up RX descriptor ring */
- efx_nic_init_rx(rx_queue);
-}
-
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
-{
- int i;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- del_timer_sync(&rx_queue->slow_fill);
-
- /* Release RX buffers from the current read ptr to the write ptr */
- if (rx_queue->buffer) {
- for (i = rx_queue->removed_count; i < rx_queue->added_count;
- i++) {
- unsigned index = i & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- efx_fini_rx_buffer(rx_queue, rx_buf);
- }
- }
-
- /* Unmap and release the pages in the recycle ring. Remove the ring. */
- for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
- struct page *page = rx_queue->page_ring[i];
- struct efx_rx_page_state *state;
-
- if (page == NULL)
- continue;
-
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- }
- kfree(rx_queue->page_ring);
- rx_queue->page_ring = NULL;
-
- if (rx_queue->xdp_rxq_info_valid)
- xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
-}
-
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
-{
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- efx_nic_remove_rx(rx_queue);
-
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
-}
-
-
-module_param(rx_refill_threshold, uint, 0444);
-MODULE_PARM_DESC(rx_refill_threshold,
- "RX descriptor ring refill threshold (%)");
-
#ifdef CONFIG_RFS_ACCEL
static void efx_filter_rfs_work(struct work_struct *data)
@@ -1206,37 +650,3 @@ bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
}
#endif /* CONFIG_RFS_ACCEL */
-
-/**
- * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
- * @spec: Specification to test
- *
- * Return: %true if the specification is a non-drop RX filter that
- * matches a local MAC address I/G bit value of 1 or matches a local
- * IPv4 or IPv6 address value in the respective multicast address
- * range. Otherwise %false.
- */
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
-{
- if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
- return false;
-
- if (spec->match_flags &
- (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
- is_multicast_ether_addr(spec->loc_mac))
- return true;
-
- if ((spec->match_flags &
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
- if (spec->ether_type == htons(ETH_P_IP) &&
- ipv4_is_multicast(spec->loc_host[0]))
- return true;
- if (spec->ether_type == htons(ETH_P_IPV6) &&
- ((const u8 *)spec->loc_host)[0] == 0xff)
- return true;
- }
-
- return false;
-}
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
new file mode 100644
index 000000000000..ee8beb87bdc1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include "efx.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
+
+/* RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ unsigned int index;
+ struct page *page;
+
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
+
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
+ }
+
+ return NULL;
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ struct page *page = rx_buf->page;
+ unsigned int index;
+
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
+ return;
+
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned int read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
+
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
+void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (iommu_present(&pci_bus_type))
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ int i;
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ unsigned int max_fill, trigger, max_trigger;
+ struct efx_nic *efx = rx_queue->efx;
+ int rc = 0;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_rx_buffer *rx_buf;
+ int i;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned int index = i & rx_queue->ptr_mask;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ efx_fini_rx_recycle_ring(rx_queue);
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+void efx_rx_slow_fill(struct timer_list *t)
+{
+ struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
+}
+
+/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ unsigned int page_offset, index, count;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ struct efx_rx_buffer *rx_buf;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+ do {
+ rc = efx_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
+
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ napi_gro_frags(napi);
+}
+
+/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
+ * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
+ */
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx, *new;
+ u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ /* Search for first gap in the numbering */
+ list_for_each_entry(ctx, head, list) {
+ if (ctx->user_id != id)
+ break;
+ id++;
+ /* Check for wrap. If this happens, we have nearly 2^32
+ * allocated RSS contexts, which seems unlikely.
+ */
+ if (WARN_ON_ONCE(!id))
+ return NULL;
+ }
+
+ /* Create the new entry */
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+ new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ new->rx_hash_udp_4tuple = false;
+
+ /* Insert the new entry into the gap */
+ new->user_id = id;
+ list_add_tail(&new->list, &ctx->list);
+ return new;
+}
+
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ list_for_each_entry(ctx, head, list)
+ if (ctx->user_id == id)
+ return ctx;
+ return NULL;
+}
+
+void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+{
+ list_del(&ctx->list);
+ kfree(ctx);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+}
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force)
+{
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+ /* ARFS is currently updating this entry, leave it */
+ return false;
+ }
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+ /* ARFS tried and failed to update this, so it's probably out
+ * of date. Remove the filter and the ARFS rule entry.
+ */
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ *force = true;
+ return true;
+ } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+ /* ARFS has moved on, so old filter is not needed. Since we did
+ * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+ * not be removed by efx_rps_hash_del() subsequently.
+ */
+ *force = true;
+ return true;
+ }
+ /* Remove it iff ARFS wants to. */
+ return true;
+}
+
+static
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ u32 hash = efx_filter_spec_hash(spec);
+
+ lockdep_assert_held(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table)
+ return NULL;
+ return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec))
+ return rule;
+ }
+ return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ *new = false;
+ return rule;
+ }
+ }
+ rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+ *new = true;
+ if (rule) {
+ memcpy(&rule->spec, spec, sizeof(rule->spec));
+ hlist_add_head(&rule->node, head);
+ }
+ return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (WARN_ON(!head))
+ return;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ /* Someone already reused the entry. We know that if
+ * this check doesn't fire (i.e. filter_id == REMOVING)
+ * then the REMOVING mark was put there by our caller,
+ * because caller is holding a lock on filter table and
+ * only holders of that lock set REMOVING.
+ */
+ if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+ return;
+ hlist_del(node);
+ kfree(rule);
+ return;
+ }
+ }
+ /* We didn't find it. */
+ WARN_ON(1);
+}
+#endif
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ goto out_unlock;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ channel->rfs_expire_index = 0;
+ channel->rfs_filter_count = 0;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
+ efx->type->filter_table_remove(efx);
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+#endif
+out_unlock:
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ cancel_delayed_work_sync(&channel->filter_work);
+ kfree(channel->rps_flow_id);
+ }
+#endif
+ down_write(&efx->filter_sem);
+ efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
new file mode 100644
index 000000000000..c41f12a89477
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_COMMON_H
+#define EFX_RX_COMMON_H
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
+{
+ return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_hash_offset;
+
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void efx_rx_slow_fill(struct timer_list *t);
+
+void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
+
+void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct page *page,
+ unsigned int page_offset,
+ u16 flags);
+void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs);
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_rx_config_page_split(struct efx_nic *efx);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh);
+
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
+void efx_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force);
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new);
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+#endif
+
+int efx_probe_filters(struct efx_nic *efx);
+void efx_remove_filters(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 8474cf8ea7d3..1ae369022d7d 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -783,7 +785,7 @@ void efx_selftest_async_cancel(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->selftest_work);
}
-void efx_selftest_async_work(struct work_struct *data)
+static void efx_selftest_async_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
selftest_work.work);
@@ -802,3 +804,8 @@ void efx_selftest_async_work(struct work_struct *data)
channel->channel, cpu);
}
}
+
+void efx_selftest_async_init(struct efx_nic *efx)
+{
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
+}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a3553816d92c..ca88ebb4f6b1 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -45,8 +45,8 @@ void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
int pkt_len);
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags);
+void efx_selftest_async_init(struct efx_nic *efx);
void efx_selftest_async_start(struct efx_nic *efx);
void efx_selftest_async_cancel(struct efx_nic *efx);
-void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 81499244a4b4..baa464161626 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -14,12 +14,14 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "efx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index dfbdf05dcf79..83dcfcae3d4b 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_channels.h"
#include "nic.h"
#include "io.h"
#include "mcdi.h"
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 00c1c4402451..04d7f41d7ed9 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -20,6 +20,7 @@
#include "io.h"
#include "nic.h"
#include "tx.h"
+#include "tx_common.h"
#include "workarounds.h"
#include "ef10_regs.h"
@@ -56,72 +57,6 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
return efx_tx_get_copy_buffer(tx_queue, buffer);
}
-static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- if (buffer->unmap_len) {
- struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
- if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
- dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- buffer->unmap_len = 0;
- }
-
- if (buffer->flags & EFX_TX_BUF_SKB) {
- struct sk_buff *skb = (struct sk_buff *)buffer->skb;
-
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
- if (tx_queue->timestamping &&
- (tx_queue->completed_timestamp_major ||
- tx_queue->completed_timestamp_minor)) {
- struct skb_shared_hwtstamps hwtstamp;
-
- hwtstamp.hwtstamp =
- efx_ptp_nic_to_kernel_time(tx_queue);
- skb_tstamp_tx(skb, &hwtstamp);
-
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
- }
- dev_consume_skb_any((struct sk_buff *)buffer->skb);
- netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
- "TX queue %d transmission id %x complete\n",
- tx_queue->queue, tx_queue->read_count);
- } else if (buffer->flags & EFX_TX_BUF_XDP) {
- xdp_return_frame_rx_napi(buffer->xdpf);
- }
-
- buffer->len = 0;
- buffer->flags = 0;
-}
-
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
-{
- /* Header and payload descriptor for each output segment, plus
- * one for every input fragment boundary within a segment
- */
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
-
- /* Possibly one more per segment for option descriptors */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
- max_descs += EFX_TSO_MAX_SEGS;
-
- /* Possibly more for PCIe page boundaries within input fragments */
- if (PAGE_SIZE > EFX_PAGE_SIZE)
- max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
-
- return max_descs;
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
@@ -333,125 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
-static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr,
- size_t len)
-{
- const struct efx_nic_type *nic_type = tx_queue->efx->type;
- struct efx_tx_buffer *buffer;
- unsigned int dma_len;
-
- /* Map the fragment taking account of NIC-dependent DMA limits. */
- do {
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
- dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
-
- buffer->len = dma_len;
- buffer->dma_addr = dma_addr;
- buffer->flags = EFX_TX_BUF_CONT;
- len -= dma_len;
- dma_addr += dma_len;
- ++tx_queue->insert_count;
- } while (len);
-
- return buffer;
-}
-
-/* Map all data from an SKB for DMA and create descriptors on the queue.
- */
-static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
- unsigned int segment_count)
-{
- struct efx_nic *efx = tx_queue->efx;
- struct device *dma_dev = &efx->pci_dev->dev;
- unsigned int frag_index, nr_frags;
- dma_addr_t dma_addr, unmap_addr;
- unsigned short dma_flags;
- size_t len, unmap_len;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- frag_index = 0;
-
- /* Map header data. */
- len = skb_headlen(skb);
- dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
- dma_flags = EFX_TX_BUF_MAP_SINGLE;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
-
- if (segment_count) {
- /* For TSO we need to put the header in to a separate
- * descriptor. Map this separately if necessary.
- */
- size_t header_len = skb_transport_header(skb) - skb->data +
- (tcp_hdr(skb)->doff << 2u);
-
- if (header_len != len) {
- tx_queue->tso_long_headers++;
- efx_tx_map_chunk(tx_queue, dma_addr, header_len);
- len -= header_len;
- dma_addr += header_len;
- }
- }
-
- /* Add descriptors for each fragment. */
- do {
- struct efx_tx_buffer *buffer;
- skb_frag_t *fragment;
-
- buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
-
- /* The final descriptor for a fragment is responsible for
- * unmapping the whole fragment.
- */
- buffer->flags = EFX_TX_BUF_CONT | dma_flags;
- buffer->unmap_len = unmap_len;
- buffer->dma_offset = buffer->dma_addr - unmap_addr;
-
- if (frag_index >= nr_frags) {
- /* Store SKB details with the final buffer for
- * the completion.
- */
- buffer->skb = skb;
- buffer->flags = EFX_TX_BUF_SKB | dma_flags;
- return 0;
- }
-
- /* Move on to the next fragment. */
- fragment = &skb_shinfo(skb)->frags[frag_index++];
- len = skb_frag_size(fragment);
- dma_addr = skb_frag_dma_map(dma_dev, fragment,
- 0, len, DMA_TO_DEVICE);
- dma_flags = 0;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
- } while (1);
-}
-
-/* Remove buffers put into a tx_queue for the current packet.
- * None of the buffers must have an skb attached.
- */
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
- unsigned int insert_count)
-{
- struct efx_tx_buffer *buffer;
- unsigned int bytes_compl = 0;
- unsigned int pkts_compl = 0;
-
- /* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != insert_count) {
- --tx_queue->insert_count;
- buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
- }
-}
-
/*
* Fallback to software TSO.
*
@@ -473,12 +289,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
dev_consume_skb_any(skb);
skb = segments;
- while (skb) {
- next = skb->next;
- skb->next = NULL;
-
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
- skb = next;
}
return 0;
@@ -687,41 +500,6 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
return i;
}
-/* Remove packets from the TX queue
- *
- * This removes packets from the TX queue, up to and including the
- * specified index.
- */
-static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int stop_index, read_ptr;
-
- stop_index = (index + 1) & tx_queue->ptr_mask;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
-
- while (read_ptr != stop_index) {
- struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
-
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
- unlikely(buffer->len == 0)) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX queue %d spurious TX completion id %x\n",
- tx_queue->queue, read_ptr);
- efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
- return;
- }
-
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-
- ++tx_queue->read_count;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
- }
-}
-
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels). On Falcon, the TX
* completion events will be directed back to the CPU that transmitted
@@ -834,173 +612,3 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
net_dev->num_tc = num_tc;
return 0;
}
-
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- unsigned fill_level;
- struct efx_nic *efx = tx_queue->efx;
- struct efx_tx_queue *txq2;
- unsigned int pkts_compl = 0, bytes_compl = 0;
-
- EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
-
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- tx_queue->pkts_compl += pkts_compl;
- tx_queue->bytes_compl += bytes_compl;
-
- if (pkts_compl > 1)
- ++tx_queue->merge_events;
-
- /* See if we need to restart the netif queue. This memory
- * barrier ensures that we write read_count (inside
- * efx_dequeue_buffers()) before reading the queue status.
- */
- smp_mb();
- if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled) &&
- likely(netif_device_present(efx->net_dev))) {
- txq2 = efx_tx_queue_partner(tx_queue);
- fill_level = max(tx_queue->insert_count - tx_queue->read_count,
- txq2->insert_count - txq2->read_count);
- if (fill_level <= efx->txq_wake_thresh)
- netif_tx_wake_queue(tx_queue->core_txq);
- }
-
- /* Check whether the hardware queue is now empty */
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
- if (tx_queue->read_count == tx_queue->old_write_count) {
- smp_mb();
- tx_queue->empty_read_count =
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
- }
- }
-}
-
-static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
-{
- return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
-}
-
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- tx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating TX queue %d size %#x mask %#x\n",
- tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
-
- /* Allocate software ring */
- tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
- GFP_KERNEL);
- if (!tx_queue->buffer)
- return -ENOMEM;
-
- tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
- sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
- if (!tx_queue->cb_page) {
- rc = -ENOMEM;
- goto fail1;
- }
-
- /* Allocate hardware ring */
- rc = efx_nic_probe_tx(tx_queue);
- if (rc)
- goto fail2;
-
- return 0;
-
-fail2:
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
-fail1:
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
- return rc;
-}
-
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
-
- netif_dbg(efx, drv, efx->net_dev,
- "initialising TX queue %d\n", tx_queue->queue);
-
- tx_queue->insert_count = 0;
- tx_queue->write_count = 0;
- tx_queue->packet_write_count = 0;
- tx_queue->old_write_count = 0;
- tx_queue->read_count = 0;
- tx_queue->old_read_count = 0;
- tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
- tx_queue->xmit_more_available = false;
- tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
- tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
-
- tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
-
- /* Set up default function pointers. These may get replaced by
- * efx_nic_init_tx() based off NIC/queue capabilities.
- */
- tx_queue->handle_tso = efx_enqueue_skb_tso;
-
- /* Set up TX descriptor ring */
- efx_nic_init_tx(tx_queue);
-
- tx_queue->initialised = true;
-}
-
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_buffer *buffer;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- if (!tx_queue->buffer)
- return;
-
- /* Free any buffers left in the ring */
- while (tx_queue->read_count != tx_queue->write_count) {
- unsigned int pkts_compl = 0, bytes_compl = 0;
- buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
- ++tx_queue->read_count;
- }
- tx_queue->xmit_more_available = false;
- netdev_tx_reset_queue(tx_queue->core_txq);
-}
-
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
-{
- int i;
-
- if (!tx_queue->buffer)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "destroying TX queue %d\n", tx_queue->queue);
- efx_nic_remove_tx(tx_queue);
-
- if (tx_queue->cb_page) {
- for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
- efx_nic_free_buffer(tx_queue->efx,
- &tx_queue->cb_page[i]);
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
- }
-
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
-}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
new file mode 100644
index 000000000000..b1571e9789d0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "tx_common.h"
+
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
+ PAGE_SIZE >> EFX_TX_CB_ORDER);
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Allocate hardware ring */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
+ /* Set up default function pointers. These may get replaced by
+ * efx_nic_init_tx() based off NIC/queue capabilities.
+ */
+ tx_queue->handle_tso = efx_enqueue_skb_tso;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_more_available = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ if (tx_queue->cb_page) {
+ for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
+ efx_nic_free_buffer(tx_queue->efx,
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_tx_queue *txq2;
+
+ EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
+
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * efx_dequeue_buffers()) before reading the queue status.
+ */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ txq2 = efx_tx_queue_partner(tx_queue);
+ fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+ txq2->insert_count - txq2->read_count);
+ if (fill_level <= efx->txq_wake_thresh)
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
+ */
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
+{
+ struct efx_tx_buffer *buffer;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts_compl = 0;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != insert_count) {
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ }
+}
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len)
+{
+ const struct efx_nic_type *nic_type = tx_queue->efx->type;
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue. */
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
+ unsigned short dma_flags;
+ size_t len, unmap_len;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
+
+ /* Map header data. */
+ len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+
+ if (segment_count) {
+ /* For TSO we need to put the header in to a separate
+ * descriptor. Map this separately if necessary.
+ */
+ size_t header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+
+ if (header_len != len) {
+ tx_queue->tso_long_headers++;
+ efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+ len -= header_len;
+ dma_addr += header_len;
+ }
+ }
+
+ /* Add descriptors for each fragment. */
+ do {
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
+
+ buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
+
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ dma_flags = 0;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for option descriptors */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
new file mode 100644
index 000000000000..f92f1fe3a87f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_COMMON_H
+#define EFX_TX_COMMON_H
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count);
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len);
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count);
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index d242906ae233..06637b03deed 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -114,7 +114,7 @@ struct ioc3_private {
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void ioc3_timeout(struct net_device *dev);
+static void ioc3_timeout(struct net_device *dev, unsigned int txqueue);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static void ioc3_start(struct ioc3_private *ip);
static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1479,7 +1479,7 @@ drop_packet:
return NETDEV_TX_OK;
}
-static void ioc3_timeout(struct net_device *dev)
+static void ioc3_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ioc3_private *ip = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 539bc5db989c..0c396ecd3389 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -90,7 +90,7 @@ struct meth_private {
spinlock_t meth_lock;
};
-static void meth_tx_timeout(struct net_device *dev);
+static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t meth_interrupt(int irq, void *dev_id);
/* global, initialized in ip32-setup.c */
@@ -727,7 +727,7 @@ static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
/*
* Deal with a transmit timeout.
*/
-static void meth_tx_timeout(struct net_device *dev)
+static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct meth_private *priv = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index c7641a236eb8..cb043eb1bdc1 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1078,7 +1078,7 @@ static void sc92031_set_multicast_list(struct net_device *dev)
spin_unlock_bh(&priv->lock);
}
-static void sc92031_tx_timeout(struct net_device *dev)
+static void sc92031_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sc92031_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 5b351beb78cb..5a4b6e3ab38f 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1538,7 +1538,7 @@ err_out_0:
goto out;
}
-static void sis190_tx_timeout(struct net_device *dev)
+static void sis190_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 85eaccbbbac1..81ed7589e33c 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -222,7 +222,7 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location);
static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
static void sis900_timer(struct timer_list *t);
static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
-static void sis900_tx_timeout(struct net_device *net_dev);
+static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue);
static void sis900_init_tx_ring(struct net_device *net_dev);
static void sis900_init_rx_ring(struct net_device *net_dev);
static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
@@ -1537,7 +1537,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
* disable interrupts and do some tasks
*/
-static void sis900_tx_timeout(struct net_device *net_dev)
+static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index be47d864f8b9..61ddee0c2a2e 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -280,6 +280,7 @@ struct epic_private {
signed char phys[4]; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
int mii_phy_cnt;
+ u32 ethtool_ops_nesting;
struct mii_if_info mii;
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int default_port:4; /* Last dev->if_port value. */
@@ -291,7 +292,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
static void epic_restart(struct net_device *dev);
static void epic_timer(struct timer_list *t);
-static void epic_tx_timeout(struct net_device *dev);
+static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void epic_init_ring(struct net_device *dev);
static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -861,7 +862,7 @@ static void epic_timer(struct timer_list *t)
add_timer(&ep->timer);
}
-static void epic_tx_timeout(struct net_device *dev)
+static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct epic_private *ep = netdev_priv(dev);
void __iomem *ioaddr = ep->ioaddr;
@@ -1435,8 +1436,10 @@ static int ethtool_begin(struct net_device *dev)
struct epic_private *ep = netdev_priv(dev);
void __iomem *ioaddr = ep->ioaddr;
+ if (ep->ethtool_ops_nesting == U32_MAX)
+ return -EBUSY;
/* power-up, if interface is down */
- if (!netif_running(dev)) {
+ if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
ew32(GENCTL, 0x0200);
ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
}
@@ -1449,7 +1452,7 @@ static void ethtool_complete(struct net_device *dev)
void __iomem *ioaddr = ep->ioaddr;
/* power-down, if interface is down */
- if (!netif_running(dev)) {
+ if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
ew32(GENCTL, 0x0008);
ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8d88e4083456..186c0bddbe5f 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -936,7 +936,7 @@ static void smc911x_phy_configure(struct work_struct *work)
if (lp->ctl_rspeed != 100)
my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
- if (!lp->ctl_rfduplx)
+ if (!lp->ctl_rfduplx)
my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
/* Update our Auto-Neg Advertisement Register */
@@ -1245,7 +1245,7 @@ static void smc911x_poll_controller(struct net_device *dev)
#endif
/* Our watchdog timed out. Called by the networking layer */
-static void smc911x_timeout(struct net_device *dev)
+static void smc911x_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc911x_local *lp = netdev_priv(dev);
int status, mask;
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index d3bb2ba51f40..4b2330deed47 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -216,7 +216,7 @@ static int smc_open(struct net_device *dev);
/*
. Our watchdog timed out. Called by the networking layer
*/
-static void smc_timeout(struct net_device *dev);
+static void smc_timeout(struct net_device *dev, unsigned int txqueue);
/*
. This is called by the kernel in response to 'ifconfig ethX down'. It
@@ -1094,7 +1094,7 @@ static int smc_open(struct net_device *dev)
.--------------------------------------------------------
*/
-static void smc_timeout(struct net_device *dev)
+static void smc_timeout(struct net_device *dev, unsigned int txqueue)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index a55f430f6a7b..f2a50eb3c1e0 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -271,7 +271,7 @@ static void smc91c92_release(struct pcmcia_device *link);
static int smc_open(struct net_device *dev);
static int smc_close(struct net_device *dev);
static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void smc_tx_timeout(struct net_device *dev);
+static void smc_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t smc_interrupt(int irq, void *dev_id);
@@ -1178,7 +1178,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
/*====================================================================*/
-static void smc_tx_timeout(struct net_device *dev)
+static void smc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 3a6761131f4c..90410f9d3b1a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1321,7 +1321,7 @@ static void smc_poll_controller(struct net_device *dev)
#endif
/* Our watchdog timed out. Called by the networking layer */
-static void smc_timeout(struct net_device *dev)
+static void smc_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 38068fc34141..49a6a9167af4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1943,15 +1943,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-/* Standard ioctls for mii-tool */
-static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(dev) || !dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -2151,7 +2142,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_start_xmit = smsc911x_hard_start_xmit,
.ndo_get_stats = smsc911x_get_stats,
.ndo_set_rx_mode = smsc911x_set_multicast_list,
- .ndo_do_ioctl = smsc911x_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = smsc911x_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2454,7 +2445,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- pdata->ioaddr = ioremap_nocache(res->start, res_size);
+ pdata->ioaddr = ioremap(res->start, res_size);
if (!pdata->ioaddr) {
retval = -ENOMEM;
goto out_ioremap_fail;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index a6962a41c3d2..7312e522c022 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -210,15 +210,6 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
return -EIO;
}
-/* Standard ioctls for mii-tool */
-static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(dev) || !dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -1504,7 +1495,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_start_xmit = smsc9420_hard_start_xmit,
.ndo_get_stats = smsc9420_get_stats,
.ndo_set_rx_mode = smsc9420_set_multicast_list,
- .ndo_do_ioctl = smsc9420_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 869a498e3b5e..e8224b543dfc 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -243,6 +243,7 @@
NET_IP_ALIGN)
#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
#define DESC_SZ sizeof(struct netsec_de)
@@ -719,7 +720,6 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- enum dma_data_direction dma_dir;
struct page *page;
page = page_pool_dev_alloc_pages(dring->page_pool);
@@ -734,9 +734,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
/* Make sure the incoming payload fits in the page for XDP and non-XDP
* cases and reserve enough space for headroom + skb_shared_info
*/
- *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
- dma_dir = page_pool_get_dma_dir(dring->page_pool);
- dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
+ *desc_len = NETSEC_RX_BUF_SIZE;
return page_address(page);
}
@@ -883,6 +881,8 @@ static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp)
{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ unsigned int len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS;
int err;
u32 act;
@@ -896,7 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX)
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -904,7 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
}
break;
default:
@@ -915,7 +919,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
break;
}
@@ -929,7 +935,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog;
- struct sk_buff *skb = NULL;
u16 xdp_xmit = 0;
u32 xdp_act = 0;
int done = 0;
@@ -943,7 +948,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = XDP_PASS;
+ u32 xdp_result = NETSEC_XDP_PASS;
+ struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
struct xdp_buff xdp;
@@ -1014,7 +1020,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache
*/
- page_pool_recycle_direct(dring->page_pool, page);
+ __page_pool_put_page(dring->page_pool, page,
+ pkt_len, true);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
@@ -1272,17 +1279,19 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
- struct page_pool_params pp_params = { 0 };
+ struct page_pool_params pp_params = {
+ .order = 0,
+ /* internal DMA mapping in page_pool */
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = DESC_NUM,
+ .nid = NUMA_NO_NODE,
+ .dev = priv->dev,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = NETSEC_RXBUF_HEADROOM,
+ .max_len = NETSEC_RX_BUF_SIZE,
+ };
int i, err;
- pp_params.order = 0;
- /* internal DMA mapping in page_pool */
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = DESC_NUM;
- pp_params.nid = cpu_to_node(0);
- pp_params.dev = priv->dev;
- pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
dring->page_pool = page_pool_create(&pp_params);
if (IS_ERR(dring->page_pool)) {
err = PTR_ERR(dring->page_pool);
@@ -1731,12 +1740,6 @@ static int netsec_netdev_set_features(struct net_device *ndev,
return 0;
}
-static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
- int cmd)
-{
- return phy_mii_ioctl(ndev->phydev, ifr, cmd);
-}
-
static int netsec_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -1821,7 +1824,7 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_features = netsec_netdev_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = netsec_netdev_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_xdp_xmit = netsec_xdp_xmit,
.ndo_bpf = netsec_xdp,
};
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f7e927ad67fa..b7032422393f 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev,
phy_ethtool_get_wol(ndev->phydev, wol);
}
-static int ave_ethtool_set_wol(struct net_device *ndev,
- struct ethtool_wolinfo *wol)
+static int __ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
{
- int ret;
-
if (!ndev->phydev ||
(wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ return phy_ethtool_set_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ ret = __ave_ethtool_set_wol(ndev, wol);
if (!ret)
device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
@@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev)
/* set wol initial state disabled */
wol.wolopts = 0;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (!phy_interface_is_rgmii(phydev))
phy_set_max_speed(phydev, SPEED_100);
@@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev)
ave_ethtool_get_wol(ndev, &wol);
wol.wolopts = priv->wolopts;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (ndev->phydev) {
ret = phy_resume(ndev->phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 94f94686cf7d..487099092693 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -363,6 +363,12 @@ struct dma_features {
unsigned int dvlan;
unsigned int l3l4fnum;
unsigned int arpoffsel;
+ /* TSN Features */
+ unsigned int estwid;
+ unsigned int estdep;
+ unsigned int estsel;
+ unsigned int fpesel;
+ unsigned int tbssel;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9f0b9a9e63b3..49d6a866244f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -171,6 +171,15 @@ struct dma_extended_desc {
__le32 des7; /* Tx/Rx Timestamp High */
};
+/* Enhanced descriptor for TBS */
+struct dma_edesc {
+ __le32 des4;
+ __le32 des5;
+ __le32 des6;
+ __le32 des7;
+ struct dma_desc basic;
+};
+
/* Transmit checksum insertion control */
#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd9967aeda22..2342d497348e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -40,7 +40,7 @@ struct tegra_eqos {
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
u32 burst_map = 0;
u32 bit_index = 0;
u32 a_index = 0;
@@ -52,9 +52,10 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return -ENOMEM;
}
- plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi");
- if (of_property_read_u32(np, "snps,write-requests",
- &plat_dat->axi->axi_wr_osr_lmt)) {
+ plat_dat->axi->axi_lpi_en = device_property_read_bool(dev,
+ "snps,en-lpi");
+ if (device_property_read_u32(dev, "snps,write-requests",
+ &plat_dat->axi->axi_wr_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
* is missing, default to 1.
@@ -68,8 +69,8 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
- if (of_property_read_u32(np, "snps,read-requests",
- &plat_dat->axi->axi_rd_osr_lmt)) {
+ if (device_property_read_u32(dev, "snps,read-requests",
+ &plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
* is missing, default to 1.
@@ -82,7 +83,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
*/
plat_dat->axi->axi_rd_osr_lmt--;
}
- of_property_read_u32(np, "snps,burst-map", &burst_map);
+ device_property_read_u32(dev, "snps,burst-map", &burst_map);
/* converts burst-map bitmask to burst array */
for (bit_index = 0; bit_index < 7; bit_index++) {
@@ -270,6 +271,7 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *data,
struct stmmac_resources *res)
{
+ struct device *dev = &pdev->dev;
struct tegra_eqos *eqos;
int err;
@@ -282,6 +284,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
+ if (!is_of_node(dev->fwnode))
+ goto bypass_clk_reset_gpio;
+
eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
if (IS_ERR(eqos->clk_master)) {
err = PTR_ERR(eqos->clk_master);
@@ -354,6 +359,7 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
+bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
@@ -421,7 +427,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
void *priv;
int ret;
- data = of_device_get_match_data(&pdev->dev);
+ data = device_get_match_data(&pdev->dev);
memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
@@ -478,7 +484,7 @@ static int dwc_eth_dwmac_remove(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
int err;
- data = of_device_get_match_data(&pdev->dev);
+ data = device_get_match_data(&pdev->dev);
err = stmmac_dvr_remove(&pdev->dev);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index bdb80421acac..9e4b83832938 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -55,6 +55,8 @@ struct mediatek_dwmac_plat_data {
struct regmap *peri_regmap;
struct device *dev;
phy_interface_t phy_mode;
+ int num_clks_to_config;
+ bool rmii_clk_from_mac;
bool rmii_rxc;
};
@@ -73,21 +75,33 @@ struct mediatek_dwmac_variant {
/* list of clocks required for mac */
static const char * const mt2712_dwmac_clk_l[] = {
- "axi", "apb", "mac_main", "ptp_ref"
+ "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
};
static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0;
int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
u32 intf_val = 0;
+ /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
+ * only in RMII(when MAC provides the reference clock), and useless for
+ * RGMII/MII/RMII(when PHY provides the reference clock).
+ * num_clks_to_config indicates the real number of clocks should be
+ * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
+ * then +1 for rmii_clk_from_mac case.
+ */
+ plat->num_clks_to_config = plat->variant->num_clks - 1;
+
/* select phy interface in top control domain */
switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII:
intf_val |= PHY_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- intf_val |= (PHY_INTF_RMII | rmii_rxc);
+ if (plat->rmii_clk_from_mac)
+ plat->num_clks_to_config++;
+ intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -173,35 +187,50 @@ static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
break;
case PHY_INTERFACE_MODE_RMII:
- /* the rmii reference clock is from external phy,
- * and the property "rmii_rxc" indicates which pin(TXC/RXC)
- * the reference clk is connected to. The reference clock is a
- * received signal, so rx_delay/rx_inv are used to indicate
- * the reference clock timing adjustment
- */
- if (plat->rmii_rxc) {
- /* the rmii reference clock from outside is connected
- * to RXC pin, the reference clock will be adjusted
- * by RXC delay macro circuit.
- */
- delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
- delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
- delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
- } else {
- /* the rmii reference clock from outside is connected
- * to TXC pin, the reference clock will be adjusted
- * by TXC delay macro circuit.
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by GTXC delay macro circuit.
+ * The ingress timing can be adjusted by TXC delay macro circuit.
*/
delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+ }
+ /* tx_inv will inverse the tx clock inside mac relateive to
+ * reference clock from external phy,
+ * and this bit is located in the same register with fine-tune
+ */
+ if (mac_delay->tx_inv)
+ fine_val = ETH_RMII_DLY_TX_INV;
}
- /* tx_inv will inverse the tx clock inside mac relateive to
- * reference clock from external phy,
- * and this bit is located in the same register with fine-tune
- */
- if (mac_delay->tx_inv)
- fine_val = ETH_RMII_DLY_TX_INV;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -278,6 +307,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse");
mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
+ plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
return 0;
}
@@ -294,6 +324,8 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
for (i = 0; i < num; i++)
plat->clks[i].id = variant->clk_list[i];
+ plat->num_clks_to_config = variant->num_clks;
+
return devm_clk_bulk_get(plat->dev, num, plat->clks);
}
@@ -321,7 +353,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
if (ret) {
dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
return ret;
@@ -336,9 +368,8 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
- const struct mediatek_dwmac_variant *variant = plat->variant;
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 01b484cb177e..58e0511badba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -335,14 +335,30 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
}
}
-static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value |= EMAC_RX_INT;
+ if (tx)
+ value |= EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
}
-static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(0, ioaddr + EMAC_INT_EN);
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value &= ~EMAC_RX_INT;
+ if (tx)
+ value &= ~EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
}
static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 2dc70d104161..af50af27550b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -64,6 +64,8 @@
#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
#define GMAC_RXQCTRL_TACPQE BIT(21)
#define GMAC_RXQCTRL_TACPQE_SHIFT 21
+#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24)
+#define GMAC_RXQCTRL_FPRQ_SHIFT 24
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
@@ -176,6 +178,8 @@ enum power_event {
#define GMAC_CONFIG_SARC GENMASK(30, 28)
#define GMAC_CONFIG_SARC_SHIFT 28
#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_IPG GENMASK(26, 24)
+#define GMAC_CONFIG_IPG_SHIFT 24
#define GMAC_CONFIG_2K BIT(22)
#define GMAC_CONFIG_ACS BIT(20)
#define GMAC_CONFIG_BE BIT(18)
@@ -183,6 +187,7 @@ enum power_event {
#define GMAC_CONFIG_JE BIT(16)
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_FES_SHIFT 14
#define GMAC_CONFIG_DM BIT(13)
#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
@@ -190,6 +195,9 @@ enum power_event {
#define GMAC_CONFIG_RE BIT(0)
/* MAC extended config */
+#define GMAC_CONFIG_EIPG GENMASK(29, 25)
+#define GMAC_CONFIG_EIPG_SHIFT 25
+#define GMAC_CONFIG_EIPG_EN BIT(24)
#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
#define GMAC_CONFIG_HDSMS_SHIFT 20
#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
@@ -231,6 +239,11 @@ enum power_event {
/* MAC HW features3 bitmap */
#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
+#define GMAC_HW_FEAT_TBSSEL BIT(27)
+#define GMAC_HW_FEAT_FPESEL BIT(26)
+#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20)
+#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17)
+#define GMAC_HW_FEAT_ESTSEL BIT(16)
#define GMAC_HW_FEAT_FRPES GENMASK(14, 13)
#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
#define GMAC_HW_FEAT_FRPSEL BIT(10)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 40ca00e596dd..f0c0ea616032 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -984,6 +984,8 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .fpe_configure = dwmac5_fpe_configure,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1027,6 +1029,8 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .fpe_configure = dwmac5_fpe_configure,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 3e14da69f378..eff82065a501 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -10,6 +10,7 @@
#include <linux/stmmac.h>
#include "common.h"
+#include "dwmac4.h"
#include "dwmac4_descs.h"
static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
@@ -505,6 +506,14 @@ static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
}
+static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
+ p->des5 = cpu_to_le32(nsec & TDES5_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -534,6 +543,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_vlan = dwmac4_set_vlan,
.get_rx_header_len = dwmac4_get_rx_header_len,
.set_sec_addr = dwmac4_set_sec_addr,
+ .set_tbs = dwmac4_set_tbs,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 6d92109dc9aa..6da070ccd737 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -73,6 +73,13 @@
#define TDES3_CONTEXT_TYPE BIT(30)
#define TDES3_CONTEXT_TYPE_SHIFT 30
+/* TDES4 */
+#define TDES4_LTV BIT(31)
+#define TDES4_LT GENMASK(7, 0)
+
+/* TDES5 */
+#define TDES5_LT GENMASK(31, 8)
+
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
#define TDES3_OWN_SHIFT 31
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index c15409030710..bb29bfcd62c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -404,6 +404,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
+ dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
+ dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
+ dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16;
dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
@@ -467,6 +472,25 @@ static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
+static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ if (en)
+ value |= DMA_CONTROL_EDSE;
+ else
+ value &= ~DMA_CONTROL_EDSE;
+
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
+ return 0;
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -523,4 +547,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
.enable_sph = dwmac4_enable_sph,
+ .enable_tbs = dwmac4_enable_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 589931795847..8391ca63d943 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -22,6 +22,7 @@
#define DMA_DEBUG_STATUS_1 0x00001010
#define DMA_DEBUG_STATUS_2 0x00001014
#define DMA_AXI_BUS_MODE 0x00001028
+#define DMA_TBS_CTRL 0x00001050
/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_SFT_RESET BIT(0)
@@ -82,6 +83,11 @@
#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+/* DMA TBS Control */
+#define DMA_TBS_FTOS GENMASK(31, 8)
+#define DMA_TBS_FTOV BIT(0)
+#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
+
/* Following DMA defines are chanels oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
@@ -114,6 +120,7 @@
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_EDSE BIT(28)
#define DMA_CONTROL_TSE BIT(12)
#define DMA_CONTROL_OSP BIT(4)
#define DMA_CONTROL_ST BIT(0)
@@ -168,6 +175,8 @@
/* DMA default interrupt mask for 4.00 */
#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
DMA_CHAN_INTR_ABNORMAL)
+#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
DMA_CHAN_INTR_ENA_RIE | \
@@ -178,6 +187,8 @@
/* DMA default interrupt mask for 4.10a */
#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
DMA_CHAN_INTR_ABNORMAL_4_10)
+#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
/* channel 0 specific fields */
#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
@@ -186,9 +197,10 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index f2a29a90e085..9becca280074 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -97,21 +97,52 @@ void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
}
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
- DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
- ioaddr + DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e436fa160c7d..494c859b4ade 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -550,3 +550,122 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
+
+static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + MTL_EST_GCL_DATA);
+
+ ctrl = (reg << ADDR_SHIFT);
+ ctrl |= gcl ? 0 : GCRR;
+
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ ctrl |= SRWO;
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & SRWO), 100, 5000);
+}
+
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ u32 speed, total_offset, offset, ctrl, ctr_low;
+ u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
+ u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
+ int i, ret = 0x0;
+ u64 total_ctr;
+
+ if (extcfg & GMAC_CONFIG_EIPG_EN) {
+ offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
+ offset = 104 + (offset * 8);
+ } else {
+ offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
+ offset = 96 - (offset * 8);
+ }
+
+ speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
+ speed = speed >> GMAC_CONFIG_FES_SHIFT;
+
+ switch (speed) {
+ case 0x0:
+ offset = offset * 1000; /* 1G */
+ break;
+ case 0x1:
+ offset = offset * 400; /* 2.5G */
+ break;
+ case 0x2:
+ offset = offset * 100000; /* 10M */
+ break;
+ case 0x3:
+ offset = offset * 10000; /* 100M */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ offset = offset / 1000;
+
+ ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
+ ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
+ ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
+ ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ if (ret)
+ return ret;
+
+ total_offset = 0;
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ if (ret)
+ return ret;
+
+ total_offset += offset;
+ }
+
+ total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+ total_ctr += total_offset;
+
+ ctr_low = do_div(total_ctr, 1000000000);
+
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
+ if (ret)
+ return ret;
+
+ ctrl = readl(ioaddr + MTL_EST_CONTROL);
+ ctrl &= ~PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= EEST | SSWL;
+ else
+ ctrl &= ~EEST;
+
+ writel(ctrl, ioaddr + MTL_EST_CONTROL);
+ return 0;
+}
+
+void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value &= ~EFPE;
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+ value &= ~GMAC_RXQCTRL_FPRQ;
+ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+ value |= EFPE;
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 23fecf68f781..3e8faa96b4d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,6 +11,9 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
+#define MAC_FPE_CTRL_STS 0x00000234
+#define EFPE BIT(0)
+
#define MAC_PPS_CONTROL 0x00000b70
#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
#define PPS_MINIDX(x) ((x) * 8)
@@ -30,6 +33,23 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_EST_CONTROL 0x00000c50
+#define PTOV GENMASK(31, 24)
+#define PTOV_SHIFT 24
+#define SSWL BIT(1)
+#define EEST BIT(0)
+#define MTL_EST_GCL_CONTROL 0x00000c80
+#define BTR_LOW 0x0
+#define BTR_HIGH 0x1
+#define CTR_LOW 0x2
+#define CTR_HIGH 0x3
+#define TER 0x4
+#define LLR 0x5
+#define ADDR_SHIFT 8
+#define GCRR BIT(2)
+#define SRWO BIT(0)
+#define MTL_EST_GCL_DATA 0x00000c84
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -83,5 +103,9 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 292b880f3f9f..e5dbd0bc257e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -96,6 +96,8 @@
/* DMA default interrupt mask */
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
+#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
/* DMA Status register defines */
#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
@@ -130,8 +132,8 @@
#define NUM_DWMAC1000_DMA_REGS 23
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 1bc25aa86dbd..688d36095333 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -37,14 +37,28 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value |= DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(0, ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value &= ~DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
}
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index ef8a07c68ca7..6c3b8a950f58 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -73,6 +73,9 @@
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL1 0x000000a4
+#define XGMAC_RQ GENMASK(7, 4)
+#define XGMAC_RQ_SHIFT 4
#define XGMAC_RXQ_CTRL2 0x000000a8
#define XGMAC_RXQ_CTRL3 0x000000ac
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
@@ -136,6 +139,11 @@
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_TBSSEL BIT(27)
+#define XGMAC_HWFEAT_FPESEL BIT(26)
+#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23)
+#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20)
+#define XGMAC_HWFEAT_ESTSEL BIT(19)
#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
#define XGMAC_HWFEAT_DVLAN BIT(13)
#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
@@ -148,6 +156,8 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_FPE_CTRL_STS 0x00000280
+#define XGMAC_EFPE BIT(0)
#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
@@ -237,6 +247,22 @@
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_EST_CONTROL 0x00001050
+#define XGMAC_PTOV GENMASK(31, 23)
+#define XGMAC_PTOV_SHIFT 23
+#define XGMAC_SSWL BIT(1)
+#define XGMAC_EEST BIT(0)
+#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
+#define XGMAC_BTR_LOW 0x0
+#define XGMAC_BTR_HIGH 0x1
+#define XGMAC_CTR_LOW 0x2
+#define XGMAC_CTR_HIGH 0x3
+#define XGMAC_TER 0x4
+#define XGMAC_LLR 0x5
+#define XGMAC_ADDR_SHIFT 8
+#define XGMAC_GCRR BIT(2)
+#define XGMAC_SRWO BIT(0)
+#define XGMAC_MTL_EST_GCL_DATA 0x00001084
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
@@ -321,6 +347,13 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_TBS_CTRL0 0x00003054
+#define XGMAC_DMA_TBS_CTRL1 0x00003058
+#define XGMAC_DMA_TBS_CTRL2 0x0000305c
+#define XGMAC_DMA_TBS_CTRL3 0x00003060
+#define XGMAC_FTOS GENMASK(31, 8)
+#define XGMAC_FTOV BIT(0)
+#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV)
#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
#define XGMAC_MCSIS BIT(31)
#define XGMAC_MSUIS BIT(29)
@@ -335,6 +368,7 @@
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_EDSE BIT(28)
#define XGMAC_TxPBL GENMASK(21, 16)
#define XGMAC_TxPBL_SHIFT 16
#define XGMAC_TSE BIT(12)
@@ -363,6 +397,8 @@
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE)
+#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
@@ -377,6 +413,9 @@
#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES0_LTV BIT(31)
+#define XGMAC_TDES0_LT GENMASK(7, 0)
+#define XGMAC_TDES1_LT GENMASK(31, 8)
#define XGMAC_TDES2_IVT GENMASK(31, 16)
#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
@@ -395,6 +434,7 @@
#define XGMAC_TDES3_TCMSSV BIT(26)
#define XGMAC_TDES3_SAIC GENMASK(25, 23)
#define XGMAC_TDES3_SAIC_SHIFT 23
+#define XGMAC_TDES3_TBSV BIT(24)
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 082f5ee9e525..2af3ac5409b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1359,6 +1359,81 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
+
+ ctrl = (reg << XGMAC_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : XGMAC_GCRR;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ ctrl |= XGMAC_SRWO;
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
+}
+
+static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ int i, ret = 0x0;
+ u32 ctrl;
+
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
+ ctrl &= ~XGMAC_PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= XGMAC_EEST | XGMAC_SSWL;
+ else
+ ctrl &= ~XGMAC_EEST;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
+ return 0;
+}
+
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
+ u32 num_rxq, bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+
+ value &= ~XGMAC_EFPE;
+
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL1);
+ value &= ~XGMAC_RQ;
+ value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
+ writel(value, ioaddr + XGMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+ value |= XGMAC_EFPE;
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
@@ -1402,6 +1477,8 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index bd5838ce1e8a..c3d654cfa9ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -339,6 +339,14 @@ static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
}
+static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV);
+ p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -368,4 +376,5 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_sarc = dwxgmac2_set_sarc,
.set_vlan_tag = dwxgmac2_set_vlan_tag,
.set_vlan = dwxgmac2_set_vlan,
+ .set_tbs = dwxgmac2_set_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index f3f08ccc379b..77308c5c5d29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -248,14 +248,30 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
-static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value |= XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value |= XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
-static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
@@ -413,6 +429,11 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 3 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
+ dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
+ dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
@@ -503,6 +524,28 @@ static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
}
+static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_EDSE;
+ else
+ value &= ~XGMAC_EDSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
+ return 0;
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -530,4 +573,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
.enable_sph = dwxgmac2_enable_sph,
+ .enable_tbs = dwxgmac2_enable_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index aa5b917398fe..df63b0367aff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -29,6 +29,7 @@ struct stmmac_extra_stats;
struct stmmac_safety_stats;
struct dma_desc;
struct dma_extended_desc;
+struct dma_edesc;
/* Descriptors helpers */
struct stmmac_desc_ops {
@@ -95,6 +96,7 @@ struct stmmac_desc_ops {
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
u32 inner_type);
void (*set_vlan)(struct dma_desc *p, u32 type);
+ void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -157,6 +159,8 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
#define stmmac_set_desc_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_vlan, __args)
+#define stmmac_set_desc_tbs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tbs, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -187,8 +191,10 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
- void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
void (*start_tx)(void __iomem *ioaddr, u32 chan);
void (*stop_tx)(void __iomem *ioaddr, u32 chan);
void (*start_rx)(void __iomem *ioaddr, u32 chan);
@@ -208,6 +214,7 @@ struct stmmac_dma_ops {
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
+ int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -266,6 +273,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
#define stmmac_enable_sph(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_sph, __args)
+#define stmmac_enable_tbs(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, enable_tbs, __args)
struct mac_device_info;
struct net_device;
@@ -274,6 +283,7 @@ struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
+struct stmmac_est;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -371,6 +381,10 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
+ int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable);
};
#define stmmac_core_init(__priv, __args...) \
@@ -457,6 +471,10 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, est_configure, __args)
+#define stmmac_fpe_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -514,6 +532,8 @@ struct stmmac_priv;
struct tc_cls_u32_offload;
struct tc_cbs_qopt_offload;
struct flow_cls_offload;
+struct tc_taprio_qopt_offload;
+struct tc_etf_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
@@ -523,6 +543,10 @@ struct stmmac_tc_ops {
struct tc_cbs_qopt_offload *qopt);
int (*setup_cls)(struct stmmac_priv *priv,
struct flow_cls_offload *cls);
+ int (*setup_taprio)(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt);
+ int (*setup_etf)(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -533,6 +557,10 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_cbs, __args)
#define stmmac_tc_setup_cls(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls, __args)
+#define stmmac_tc_setup_taprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_taprio, __args)
+#define stmmac_tc_setup_etf(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_etf, __args)
struct stmmac_counters;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 252cf48c5816..a57b0fa815ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,13 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+#define MMC_TX_FPE_FRAG 0x1a8
+#define MMC_TX_HOLD_REQ 0x1ac
+#define MMC_RX_PKT_ASSEMBLY_ERR 0x1c8
+#define MMC_RX_PKT_SMD_ERR 0x1cc
+#define MMC_RX_PKT_ASSEMBLY_OK 0x1d0
+#define MMC_RX_FPE_FRAG 0x1d4
+
/* XGMAC MMC Registers */
#define MMC_XGMAC_TX_OCTET_GB 0x14
#define MMC_XGMAC_TX_PKT_GB 0x1c
@@ -315,6 +322,15 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr += readl(mmcaddr + MMC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr += readl(mmcaddr + MMC_RX_FPE_FRAG);
}
const struct stmmac_mmc_ops dwmac_mmc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d993fc7e82c3..9c02fc754bf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -39,13 +39,18 @@ struct stmmac_tx_info {
bool is_jumbo;
};
+#define STMMAC_TBS_AVAIL BIT(0)
+#define STMMAC_TBS_EN BIT(1)
+
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
u32 tx_count_frames;
+ int tbs;
struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+ struct dma_edesc *dma_entx;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
struct stmmac_tx_info *tx_skbuff_dma;
@@ -88,6 +93,7 @@ struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data;
+ spinlock_t lock;
u32 index;
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 80d59b775907..ff1cbfc834b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -388,9 +388,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
- if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
- (priv->hw->pcs == STMMAC_PCS_TBI) ||
- (priv->hw->pcs == STMMAC_PCS_RTBI))
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
+ priv->hw->pcs == STMMAC_PCS_RTBI)
return false;
/* Check if MAC core supports the EEE feature. */
@@ -1090,6 +1089,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
if (priv->extend_desc)
head_tx = (void *)tx_q->dma_etx;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ head_tx = (void *)tx_q->dma_entx;
else
head_tx = (void *)tx_q->dma_tx;
@@ -1163,13 +1164,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
- for (i = 0; i < DMA_TX_SIZE; i++)
+ for (i = 0; i < DMA_TX_SIZE; i++) {
+ int last = (i == (DMA_TX_SIZE - 1));
+ struct dma_desc *p;
+
if (priv->extend_desc)
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
- priv->mode, (i == DMA_TX_SIZE - 1));
+ p = &tx_q->dma_etx[i].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[i].basic;
else
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
- priv->mode, (i == DMA_TX_SIZE - 1));
+ p = &tx_q->dma_tx[i];
+
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
+ }
}
/**
@@ -1383,7 +1390,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
- else
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
}
@@ -1392,6 +1399,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
else
p = tx_q->dma_tx + i;
@@ -1511,19 +1520,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
/* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue);
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc)
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- tx_q->dma_tx, tx_q->dma_tx_phy);
- else
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- tx_q->dma_etx, tx_q->dma_tx_phy);
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
+
+ size *= DMA_TX_SIZE;
+
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff);
@@ -1616,6 +1632,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
@@ -1632,28 +1650,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
if (!tx_q->tx_skbuff)
goto err_dma;
- if (priv->extend_desc) {
- tx_q->dma_etx = dma_alloc_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_extended_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
- if (!tx_q->dma_etx)
- goto err_dma;
- } else {
- tx_q->dma_tx = dma_alloc_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
- if (!tx_q->dma_tx)
- goto err_dma;
- }
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
+
+ size *= DMA_TX_SIZE;
+
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ goto err_dma;
+
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
}
return 0;
err_dma:
free_dma_tx_desc_resources(priv);
-
return ret;
}
@@ -1885,6 +1907,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->extend_desc)
p = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[entry].basic;
else
p = tx_q->dma_tx + entry;
@@ -1966,7 +1990,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -1983,19 +2007,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int i;
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan);
- for (i = 0; i < DMA_TX_SIZE; i++)
- if (priv->extend_desc)
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
- priv->mode, (i == DMA_TX_SIZE - 1));
- else
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
- priv->mode, (i == DMA_TX_SIZE - 1));
+ stmmac_clear_tx_descriptors(priv, chan);
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
@@ -2060,17 +2077,25 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
if (napi_schedule_prep(&ch->rx_napi)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule_irqoff(&ch->rx_napi);
- status |= handle_tx;
}
}
- if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
- napi_schedule_irqoff(&ch->tx_napi);
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
+ if (napi_schedule_prep(&ch->tx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule_irqoff(&ch->tx_napi);
+ }
+ }
return status;
}
@@ -2265,14 +2290,14 @@ static void stmmac_tx_timer(struct timer_list *t)
ch = &priv->channel[tx_q->queue_index];
- /*
- * If NAPI is already running we can miss some events. Let's rearm
- * the timer and try again.
- */
- if (likely(napi_schedule_prep(&ch->tx_napi)))
+ if (likely(napi_schedule_prep(&ch->tx_napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi);
- else
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+ }
}
/**
@@ -2624,6 +2649,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->dma_cap.vlins)
stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+ /* TBS */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
+
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ }
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2653,8 +2686,7 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
@@ -2681,6 +2713,16 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
+ tx_q->tbs &= ~STMMAC_TBS_AVAIL;
+ }
+
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
@@ -2829,7 +2871,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
tag = skb_vlan_tag_get(skb);
- p = tx_q->dma_tx + tx_q->cur_tx;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ p = &tx_q->dma_tx[tx_q->cur_tx];
+
if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
return false;
@@ -2864,7 +2910,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
- desc = tx_q->dma_tx + tx_q->cur_tx;
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
if (priv->dma_cap.addr64 <= 32)
@@ -2915,13 +2965,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
+ int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
- int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- u8 proto_hdr_len, hdr;
bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -2958,7 +3008,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != tx_q->mss) {
- mss_desc = tx_q->dma_tx + tx_q->cur_tx;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
+
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
@@ -2978,7 +3032,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
- desc = tx_q->dma_tx + first_entry;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[first_entry].basic;
+ else
+ desc = &tx_q->dma_tx[first_entry];
first = desc;
if (has_vlan)
@@ -3050,7 +3107,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
set_ic = false;
if (set_ic) {
- desc = &tx_q->dma_tx[tx_q->cur_tx];
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
@@ -3113,16 +3174,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
tx_q->cur_tx, first, nfrags);
-
- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
-
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -3152,10 +3215,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
+ struct dma_edesc *tbs_desc = NULL;
+ int entry, desc_size, first_tx;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
- int entry, first_tx;
dma_addr_t des;
tx_q = &priv->tx_queue[queue];
@@ -3195,6 +3259,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
@@ -3224,6 +3290,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
@@ -3270,6 +3338,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (set_ic) {
if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = &tx_q->dma_tx[entry];
@@ -3287,20 +3357,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
- void *tx_head;
-
netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags);
- if (priv->extend_desc)
- tx_head = (void *)tx_q->dma_etx;
- else
- tx_head = (void *)tx_q->dma_tx;
-
- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
-
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
@@ -3346,12 +3407,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
- csum_insertion, priv->mode, 1, last_segment,
+ csum_insertion, priv->mode, 0, last_segment,
skb->len);
- } else {
- stmmac_set_tx_owner(priv, first);
}
+ if (tx_q->tbs & STMMAC_TBS_EN) {
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
+
+ tbs_desc = &tx_q->dma_entx[first_entry];
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
+ }
+
+ stmmac_set_tx_owner(priv, first);
+
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
@@ -3362,7 +3430,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -3751,8 +3826,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
work_done = stmmac_rx(priv, budget, chan);
- if (work_done < budget && napi_complete_done(napi, work_done))
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
return work_done;
}
@@ -3761,7 +3842,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
int work_done;
@@ -3770,15 +3850,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
work_done = min(work_done, budget);
- if (work_done < budget)
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
- /* Force transmission restart */
- tx_q = &priv->tx_queue[chan];
- if (tx_q->cur_tx != tx_q->dirty_tx) {
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
- chan);
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
}
return work_done;
@@ -3792,7 +3869,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void stmmac_tx_timeout(struct net_device *dev)
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4078,6 +4155,10 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
priv, priv, true);
case TC_SETUP_QDISC_CBS:
return stmmac_tc_setup_cbs(priv, priv, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4181,7 +4262,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
DMA_TX_SIZE, 1, seq);
- } else {
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
DMA_TX_SIZE, 0, seq);
@@ -4250,9 +4331,44 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
priv->dma_cap.number_rx_channel);
seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
+ priv->dma_cap.number_rx_queues);
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
+ priv->dma_cap.number_tx_queues);
seq_printf(seq, "\tEnhanced descriptors: %s\n",
(priv->dma_cap.enh_desc) ? "Y" : "N");
-
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
+ priv->dma_cap.pps_out_num);
+ seq_printf(seq, "\tSafety Features: %s\n",
+ priv->dma_cap.asp ? "Y" : "N");
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ priv->dma_cap.frpsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
+ priv->dma_cap.addr64);
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ priv->dma_cap.rssen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+ priv->dma_cap.vlhash ? "Y" : "N");
+ seq_printf(seq, "\tSplit Header: %s\n",
+ priv->dma_cap.sphen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
+ priv->dma_cap.vlins ? "Y" : "N");
+ seq_printf(seq, "\tDouble VLAN: %s\n",
+ priv->dma_cap.dvlan ? "Y" : "N");
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
+ priv->dma_cap.l3l4fnum);
+ seq_printf(seq, "\tARP Offloading: %s\n",
+ priv->dma_cap.arpoffsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
+ priv->dma_cap.estsel ? "Y" : "N");
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
+ priv->dma_cap.fpesel ? "Y" : "N");
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
+ priv->dma_cap.tbssel ? "Y" : "N");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
@@ -4728,6 +4844,7 @@ int stmmac_dvr_probe(struct device *device,
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
+ spin_lock_init(&ch->lock);
ch->priv_data = priv;
ch->index = queue;
@@ -4757,8 +4874,7 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
@@ -4792,8 +4908,7 @@ int stmmac_dvr_probe(struct device *device,
error_netdev_register:
phylink_destroy(priv->phylink);
error_phy_setup:
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
@@ -4838,8 +4953,7 @@ int stmmac_dvr_remove(struct device *dev)
reset_control_assert(priv->plat->stmmac_rst);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8237dbc3e991..623521052152 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -65,7 +65,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
- plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -154,8 +153,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->tx_queues_cfg[6].weight = 0x0F;
plat->tx_queues_cfg[7].weight = 0x10;
- plat->mdio_bus_data->phy_mask = 0;
-
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
plat->dma_cfg->fixed_burst = 0;
@@ -386,8 +383,6 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tso_en = 1;
plat->pmt = 1;
- plat->mdio_bus_data->phy_mask = 0;
-
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -406,6 +401,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4775f49d7f3b..d10ac54bf385 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- rc = of_get_phy_mode(np, &plat->phy_interface);
- if (rc)
- return ERR_PTR(rc);
+ plat->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (plat->phy_interface < 0)
+ return ERR_PTR(plat->phy_interface);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 13227909287c..2aba2673d6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -14,6 +14,7 @@
#include <linux/phy.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
@@ -50,6 +51,7 @@ struct stmmac_packet_attrs {
u8 id;
int sarc;
u16 queue_mapping;
+ u64 timestamp;
};
static u8 stmmac_test_next_id;
@@ -80,7 +82,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
if (attr->max_size && (attr->max_size > size))
size = attr->max_size;
- skb = netdev_alloc_skb_ip_align(priv->dev, size);
+ skb = netdev_alloc_skb(priv->dev, size);
if (!skb)
return NULL;
@@ -208,6 +210,9 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
skb->pkt_type = PACKET_HOST;
skb->dev = priv->dev;
+ if (attr->timestamp)
+ skb->tstamp = ns_to_ktime(attr->timestamp);
+
return skb;
}
@@ -244,6 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ unsigned char *src = tpriv->packet->src;
+ unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
@@ -260,15 +267,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (tpriv->packet->dst) {
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (dst) {
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
goto out;
}
if (tpriv->packet->sarc) {
- if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
goto out;
- } else if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
+ } else if (src) {
+ if (!ether_addr_equal_unaligned(ehdr->h_source, src))
goto out;
}
@@ -339,8 +346,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
goto cleanup;
}
- skb_set_queue_mapping(skb, attr->queue_mapping);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
if (ret)
goto cleanup;
@@ -714,7 +720,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
struct ethhdr *ehdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
goto out;
if (ehdr->h_proto != htons(ETH_P_PAUSE))
goto out;
@@ -851,12 +857,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb,
if (tpriv->vlan_id) {
if (skb->vlan_proto != htons(proto))
goto out;
- if (skb->vlan_tci != tpriv->vlan_id)
+ if (skb->vlan_tci != tpriv->vlan_id) {
+ /* Means filter did not work. */
+ tpriv->ok = false;
+ complete(&tpriv->comp);
goto out;
+ }
}
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
goto out;
ihdr = ip_hdr(skb);
@@ -926,8 +936,7 @@ static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -965,6 +974,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_vlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
@@ -1018,8 +1030,7 @@ static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -1057,6 +1068,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_dvlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
@@ -1286,8 +1300,7 @@ static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
skb->protocol = htons(proto);
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -1323,16 +1336,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
@@ -1399,7 +1415,8 @@ cleanup_cls:
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
@@ -1444,16 +1461,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
@@ -1525,7 +1545,8 @@ cleanup_cls:
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
@@ -1578,7 +1599,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb,
struct arphdr *ahdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
goto out;
ahdr = arp_hdr(skb);
@@ -1639,8 +1660,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
if (ret)
goto cleanup;
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto cleanup_promisc;
@@ -1728,6 +1748,68 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
return 0;
}
+static int stmmac_test_tbs(struct stmmac_priv *priv)
+{
+#define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/
+ struct stmmac_packet_attrs attr = { };
+ struct tc_etf_qopt_offload qopt;
+ u64 start_time, curr_time = 0;
+ unsigned long flags;
+ int ret, i;
+
+ if (!priv->hwts_tx_en)
+ return -EOPNOTSUPP;
+
+ /* Find first TBS enabled Queue, if any */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ break;
+
+ if (i >= priv->plat->tx_queues_to_use)
+ return -EOPNOTSUPP;
+
+ qopt.enable = true;
+ qopt.queue = i;
+
+ ret = stmmac_tc_setup_etf(priv, priv, &qopt);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if (!curr_time) {
+ ret = -EOPNOTSUPP;
+ goto fail_disable;
+ }
+
+ start_time = curr_time;
+ curr_time += STMMAC_TBS_LT_OFFSET;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.timestamp = curr_time;
+ attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
+ attr.queue_mapping = i;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto fail_disable;
+
+ /* Check if expected time has elapsed */
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
+ ret = -EINVAL;
+
+fail_disable:
+ qopt.enable = false;
+ stmmac_tc_setup_etf(priv, priv, &qopt);
+ return ret;
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -1861,6 +1943,10 @@ static const struct stmmac_test {
.name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
+ }, {
+ .name = "TBS (ETF Scheduler) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_tbs,
},
};
@@ -1869,7 +1955,6 @@ void stmmac_selftest_run(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
int count = stmmac_selftest_get_count(priv);
- int carrier = netif_carrier_ok(dev);
int i, ret;
memset(buf, 0, sizeof(*buf) * count);
@@ -1879,15 +1964,12 @@ void stmmac_selftest_run(struct net_device *dev,
netdev_err(priv->dev, "Only offline tests are supported\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
- } else if (!carrier) {
+ } else if (!netif_carrier_ok(dev)) {
netdev_err(priv->dev, "You need valid Link to execute tests\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
- /* We don't want extra traffic */
- netif_carrier_off(dev);
-
/* Wait for queues drain */
msleep(200);
@@ -1942,10 +2024,6 @@ void stmmac_selftest_run(struct net_device *dev,
break;
}
}
-
- /* Restart everything */
- if (carrier)
- netif_carrier_on(dev);
}
void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 7d972e0fd2b0..7a01dee2f9a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
{
int ret = 0;
+ /* When RSS is enabled, the filtering will be bypassed */
+ if (priv->rss.enable)
+ return -EBUSY;
+
switch (cls->command) {
case FLOW_CLS_REPLACE:
ret = tc_add_flow(priv, cls);
@@ -591,9 +595,167 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct plat_stmmacenet_data *plat = priv->plat;
+ struct timespec64 time;
+ bool fpe = false;
+ int i, ret = 0;
+ u64 ctr;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ switch (wid) {
+ case 0x1:
+ wid = 16;
+ break;
+ case 0x2:
+ wid = 20;
+ break;
+ case 0x3:
+ wid = 24;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (dep) {
+ case 0x1:
+ dep = 64;
+ break;
+ case 0x2:
+ dep = 128;
+ break;
+ case 0x3:
+ dep = 256;
+ break;
+ case 0x4:
+ dep = 512;
+ break;
+ case 0x5:
+ dep = 1024;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!qopt->enable)
+ goto disable;
+ if (qopt->num_entries >= dep)
+ return -EINVAL;
+ if (!qopt->base_time)
+ return -ERANGE;
+ if (!qopt->cycle_time)
+ return -ERANGE;
+
+ if (!plat->est) {
+ plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ GFP_KERNEL);
+ if (!plat->est)
+ return -ENOMEM;
+ } else {
+ memset(plat->est, 0, sizeof(*plat->est));
+ }
+
+ size = qopt->num_entries;
+
+ priv->plat->est->gcl_size = size;
+ priv->plat->est->enable = qopt->enable;
+
+ for (i = 0; i < size; i++) {
+ s64 delta_ns = qopt->entries[i].interval;
+ u32 gates = qopt->entries[i].gate_mask;
+
+ if (delta_ns > GENMASK(wid, 0))
+ return -ERANGE;
+ if (gates > GENMASK(31 - wid, 0))
+ return -ERANGE;
+
+ switch (qopt->entries[i].command) {
+ case TC_TAPRIO_CMD_SET_GATES:
+ if (fpe)
+ return -EINVAL;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_HOLD:
+ gates |= BIT(0);
+ fpe = true;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_RELEASE:
+ gates &= ~BIT(0);
+ fpe = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ }
+
+ /* Adjust for real system time */
+ time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+
+ ctr = qopt->cycle_time;
+ priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->plat->est->ctr[1] = (u32)ctr;
+
+ if (fpe && !priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ ret = stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, fpe);
+ if (ret && fpe) {
+ netdev_err(priv->dev, "failed to enable Frame Preemption\n");
+ return ret;
+ }
+
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ if (ret) {
+ netdev_err(priv->dev, "failed to configure EST\n");
+ goto disable;
+ }
+
+ netdev_info(priv->dev, "configured EST\n");
+ return 0;
+
+disable:
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ return ret;
+}
+
+static int tc_setup_etf(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt)
+{
+ if (!priv->dma_cap.tbssel)
+ return -EOPNOTSUPP;
+ if (qopt->queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+ if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ return -EINVAL;
+
+ if (qopt->enable)
+ priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ else
+ priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+
+ netdev_info(priv->dev, "%s ETF for Queue %d\n",
+ qopt->enable ? "enabled" : "disabled", qopt->queue);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio,
+ .setup_etf = tc_setup_etf,
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index c91876f8c536..6ec9163e232c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2666,7 +2666,7 @@ static void cas_netpoll(struct net_device *dev)
}
#endif
-static void cas_tx_timeout(struct net_device *dev)
+static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cas *cp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f5fd1f3c07cc..9a5004f674c7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6517,7 +6517,7 @@ static void niu_reset_task(struct work_struct *work)
spin_unlock_irqrestore(&np->lock, flags);
}
-static void niu_tx_timeout(struct net_device *dev)
+static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct niu *np = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index e9b757b03b56..c5add0b45eed 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -941,7 +941,7 @@ static int bigmac_close(struct net_device *dev)
return 0;
}
-static void bigmac_tx_timeout(struct net_device *dev)
+static void bigmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bigmac *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3e7631160384..8358064fbd48 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -970,7 +970,7 @@ static void gem_poll_controller(struct net_device *dev)
}
#endif
-static void gem_tx_timeout(struct net_device *dev)
+static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index d007dfeba5c3..f0fe7bb2a750 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2246,7 +2246,7 @@ static int happy_meal_close(struct net_device *dev)
#define SXD(x)
#endif
-static void happy_meal_tx_timeout(struct net_device *dev)
+static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct happy_meal *hp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 1468fa0a54e9..2102b95ec347 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -544,7 +544,7 @@ static void qe_tx_reclaim(struct sunqe *qep)
qep->tx_old = elem;
}
-static void qe_tx_timeout(struct net_device *dev)
+static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sunqe *qep = netdev_priv(dev);
int tx_full;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8b94d9ad9e2b..c23ce838ff63 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1223,7 +1223,7 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
{
struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct sk_buff *segs;
+ struct sk_buff *segs, *curr, *next;
int maclen, datalen;
int status;
int gso_size, gso_type, gso_segs;
@@ -1282,11 +1282,8 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
skb_reset_mac_header(skb);
status = 0;
- while (segs) {
- struct sk_buff *curr = segs;
-
- segs = segs->next;
- curr->next = NULL;
+ skb_list_walk_safe(segs, curr, next) {
+ skb_mark_not_on_list(curr);
if (port->tso && curr->len > dev->mtu) {
skb_shinfo(curr)->gso_size = gso_size;
skb_shinfo(curr)->gso_type = gso_type;
@@ -1539,7 +1536,7 @@ out_dropped:
}
EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
-void sunvnet_tx_timeout_common(struct net_device *dev)
+void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue)
{
/* XXX Implement me XXX */
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index 2b808d2482d6..5416a3cb9e7d 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -135,7 +135,7 @@ int sunvnet_open_common(struct net_device *dev);
int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
-void sunvnet_tx_timeout_common(struct net_device *dev);
+void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue);
netdev_tx_t
sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
struct vnet_port *(*vnet_tx_port)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index a1f5a1e61040..07046a2370b3 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -689,7 +689,7 @@ static int xlgmac_close(struct net_device *netdev)
return 0;
}
-static void xlgmac_tx_timeout(struct net_device *netdev)
+static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct xlgmac_pdata *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 3a655a4dc10e..a530afe3ce12 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -797,7 +797,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void cpmac_tx_timeout(struct net_device *dev)
+static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -816,16 +816,6 @@ static void cpmac_tx_timeout(struct net_device *dev)
netif_tx_wake_all_queues(priv->dev);
}
-static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!(netif_running(dev)))
- return -EINVAL;
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void cpmac_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@@ -1054,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
- .ndo_do_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 707d5eb480ce..97a058ca60ac 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -272,7 +272,7 @@ void soft_reset(const char *module, void __iomem *reg)
WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
}
-void cpsw_ndo_tx_timeout(struct net_device *ndev)
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index bc726356a72c..b8d7b924ee3d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -449,7 +449,7 @@ int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
void cpsw_rx_vlan_encap(struct sk_buff *skb);
void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
-void cpsw_ndo_tx_timeout(struct net_device *ndev);
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ae27be85e363..75d4e16c692b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -983,7 +983,7 @@ fail_tx:
* error and re-initialize the TX channel for hardware operation
*
*/
-static void emac_dev_tx_timeout(struct net_device *ndev)
+static void emac_dev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1b2702f74455..d7a144b4a09f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1811,7 +1811,7 @@ out:
return (ret == 0) ? 0 : err;
}
-static void netcp_ndo_tx_timeout(struct net_device *ndev)
+static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct netcp_intf *netcp = netdev_priv(ndev);
unsigned int descs = knav_pool_count(netcp->tx_pool);
@@ -2019,7 +2019,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
goto quit;
}
- efuse = devm_ioremap_nocache(dev, res.start, size);
+ efuse = devm_ioremap(dev, res.start, size);
if (!efuse) {
dev_err(dev, "could not map resource\n");
devm_release_mem_region(dev, res.start, size);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d6a192c1f337..fb36115e9c51 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2533,8 +2533,6 @@ static int gbe_del_vid(void *intf_priv, int vid)
}
#if IS_ENABLED(CONFIG_TI_CPTS)
-#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
-#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
static void gbe_txtstamp(void *context, struct sk_buff *skb)
{
@@ -2566,7 +2564,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
* We mark it here because skb_tx_timestamp() is called
* after all the txhooks are called.
*/
- if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
+ if (phy_has_txtstamp(phydev)) {
skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
return 0;
}
@@ -2588,7 +2586,7 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
if (p_info->rxtstamp_complete)
return 0;
- if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
+ if (phy_has_rxtstamp(phydev)) {
p_info->rxtstamp_complete = true;
return 0;
}
@@ -2830,7 +2828,7 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
struct gbe_intf *gbe_intf = intf_priv;
struct phy_device *phy = gbe_intf->slave->phy;
- if (!phy || !phy->drv->hwtstamp) {
+ if (!phy_has_hwtstamp(phy)) {
switch (cmd) {
case SIOCGHWTSTAMP:
return gbe_hwtstamp_get(gbe_intf, req);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 78f0f2d59e22..ad465202980a 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -161,7 +161,7 @@ static void tlan_set_multicast_list(struct net_device *);
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
int irq, int rev, const struct pci_device_id *ent);
-static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tlan_tx_timeout_work(struct work_struct *work);
static int tlan_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -997,7 +997,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
**************************************************************/
-static void tlan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
@@ -1028,7 +1028,7 @@ static void tlan_tx_timeout_work(struct work_struct *work)
struct tlan_priv *priv =
container_of(work, struct tlan_priv, tlan_tqueue);
- tlan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev, UINT_MAX);
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9d9f8acb7ee3..070dd6fa9401 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1405,7 +1405,7 @@ out:
*
* called, if tx hangs. Schedules a task that resets the interface
*/
-void gelic_net_tx_timeout(struct net_device *netdev)
+void gelic_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 051033580f0a..805903dbddcc 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,7 +359,7 @@ int gelic_net_open(struct net_device *netdev);
int gelic_net_stop(struct net_device *netdev);
netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
-void gelic_net_tx_timeout(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev, unsigned int txqueue);
int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 538e70810d3d..6576271642c1 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2180,7 +2180,7 @@ out:
* called, if tx hangs. Schedules a task that resets the interface
*/
static void
-spider_net_tx_timeout(struct net_device *netdev)
+spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct spider_net_card *card;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 12466a72cefc..3fd43d30b20d 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -483,8 +483,7 @@ static void tc35815_txdone(struct net_device *dev);
static int tc35815_close(struct net_device *dev);
static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
static void tc35815_set_multicast_list(struct net_device *dev);
-static void tc35815_tx_timeout(struct net_device *dev);
-static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tc35815_poll_controller(struct net_device *dev);
#endif
@@ -751,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_get_stats = tc35815_get_stats,
.ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
- .ndo_do_ioctl = tc35815_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1189,7 +1188,7 @@ static void tc35815_schedule_restart(struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
}
-static void tc35815_tx_timeout(struct net_device *dev)
+static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
@@ -2009,15 +2008,6 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
- if (!dev->phydev)
- return -ENODEV;
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static void tc35815_chip_reset(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ed12dbd156f0..803247d51fe9 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -506,7 +506,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int rhine_open(struct net_device *dev);
static void rhine_reset_task(struct work_struct *work);
static void rhine_slow_event_task(struct work_struct *work);
-static void rhine_tx_timeout(struct net_device *dev);
+static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
@@ -1761,7 +1761,7 @@ out_unlock:
mutex_unlock(&rp->task_lock);
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 346e44115c4e..4b556b74541a 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3257,12 +3257,16 @@ static struct platform_driver velocity_platform_driver = {
* @dev: network device
*
* Called before an ethtool operation. We need to make sure the
- * chip is out of D3 state before we poke at it.
+ * chip is out of D3 state before we poke at it. In case of ethtool
+ * ops nesting, only wake the device up in the outermost block.
*/
static int velocity_ethtool_up(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
- if (!netif_running(dev))
+
+ if (vptr->ethtool_ops_nesting == U32_MAX)
+ return -EBUSY;
+ if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
velocity_set_power_state(vptr, PCI_D0);
return 0;
}
@@ -3272,12 +3276,14 @@ static int velocity_ethtool_up(struct net_device *dev)
* @dev: network device
*
* Called after an ethtool operation. Restore the chip back to D3
- * state if it isn't running.
+ * state if it isn't running. In case of ethtool ops nesting, only
+ * put the device to sleep in the outermost block.
*/
static void velocity_ethtool_down(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
- if (!netif_running(dev))
+
+ if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
velocity_set_power_state(vptr, PCI_D3hot);
}
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index cdfe7809e3c1..f196e71d2c04 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1483,6 +1483,7 @@ struct velocity_info {
struct velocity_context context;
u32 ticks;
+ u32 ethtool_ops_nesting;
u8 rev_id;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index bede1ff289c5..c0d181a7f83a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -790,7 +790,7 @@ static void w5100_restart_work(struct work_struct *work)
w5100_restart(priv->ndev);
}
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct w5100_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 6ba2747779ce..46aae30c4636 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -341,7 +341,7 @@ static void w5300_get_regs(struct net_device *ndev,
}
}
-static void w5300_tx_timeout(struct net_device *ndev)
+static void w5300_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct w5300_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 21c1b4322ea7..6f11f52c9a9e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1080,17 +1080,6 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
-static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!ndev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(ndev->phydev, rq, cmd);
-}
-
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
@@ -1098,7 +1087,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = temac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1202,7 +1191,7 @@ static int temac_probe(struct platform_device *pdev)
/* map device registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ lp->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
@@ -1296,7 +1285,7 @@ static int temac_probe(struct platform_device *pdev)
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0de52e70abcc..0c26f5bcc523 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -521,7 +521,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
*
* This function is called when Tx time out occurs for Emaclite device.
*/
-static void xemaclite_tx_timeout(struct net_device *dev)
+static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index fd5288ff53b5..480ab7251515 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -288,7 +288,7 @@ struct local_info {
*/
static netdev_tx_t do_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void xirc_tx_timeout(struct net_device *dev);
+static void xirc_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void xirc2ps_tx_timeout_task(struct work_struct *work);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -1203,7 +1203,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
}
static void
-xirc_tx_timeout(struct net_device *dev)
+xirc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct local_info *lp = netdev_priv(dev);
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cd0a8f46e7c6..98aa7b8ddb06 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -27,4 +27,18 @@ config IXP4XX_ETH
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
+config PTP_1588_CLOCK_IXP46X
+ tristate "Intel IXP46x as PTP clock"
+ depends on IXP4XX_ETH
+ depends on PTP_1588_CLOCK
+ default y
+ help
+ This driver adds support for using the IXP46X as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_ixp46x.
+
endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index 794a519d07b3..607f91b1e878 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -3,4 +3,5 @@
# Makefile for the Intel XScale IXP device drivers.
#
-obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
+obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
+obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
diff --git a/drivers/net/ethernet/xscale/ixp46x_ts.h b/drivers/net/ethernet/xscale/ixp46x_ts.h
new file mode 100644
index 000000000000..d792130e27b0
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp46x_ts.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP 1588 clock using the IXP46X
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+
+#ifndef _IXP46X_TS_H_
+#define _IXP46X_TS_H_
+
+#define DEFAULT_ADDEND 0xF0000029
+#define TICKS_NS_SHIFT 4
+
+struct ixp46x_channel_ctl {
+ u32 ch_control; /* 0x40 Time Synchronization Channel Control */
+ u32 ch_event; /* 0x44 Time Synchronization Channel Event */
+ u32 tx_snap_lo; /* 0x48 Transmit Snapshot Low Register */
+ u32 tx_snap_hi; /* 0x4C Transmit Snapshot High Register */
+ u32 rx_snap_lo; /* 0x50 Receive Snapshot Low Register */
+ u32 rx_snap_hi; /* 0x54 Receive Snapshot High Register */
+ u32 src_uuid_lo; /* 0x58 Source UUID0 Low Register */
+ u32 src_uuid_hi; /* 0x5C Sequence Identifier/Source UUID0 High */
+};
+
+struct ixp46x_ts_regs {
+ u32 control; /* 0x00 Time Sync Control Register */
+ u32 event; /* 0x04 Time Sync Event Register */
+ u32 addend; /* 0x08 Time Sync Addend Register */
+ u32 accum; /* 0x0C Time Sync Accumulator Register */
+ u32 test; /* 0x10 Time Sync Test Register */
+ u32 unused; /* 0x14 */
+ u32 rsystime_lo; /* 0x18 RawSystemTime_Low Register */
+ u32 rsystime_hi; /* 0x1C RawSystemTime_High Register */
+ u32 systime_lo; /* 0x20 SystemTime_Low Register */
+ u32 systime_hi; /* 0x24 SystemTime_High Register */
+ u32 trgt_lo; /* 0x28 TargetTime_Low Register */
+ u32 trgt_hi; /* 0x2C TargetTime_High Register */
+ u32 asms_lo; /* 0x30 Auxiliary Slave Mode Snapshot Low */
+ u32 asms_hi; /* 0x34 Auxiliary Slave Mode Snapshot High */
+ u32 amms_lo; /* 0x38 Auxiliary Master Mode Snapshot Low */
+ u32 amms_hi; /* 0x3C Auxiliary Master Mode Snapshot High */
+
+ struct ixp46x_channel_ctl channel[3];
+};
+
+/* 0x00 Time Sync Control Register Bits */
+#define TSCR_AMM (1<<3)
+#define TSCR_ASM (1<<2)
+#define TSCR_TTM (1<<1)
+#define TSCR_RST (1<<0)
+
+/* 0x04 Time Sync Event Register Bits */
+#define TSER_SNM (1<<3)
+#define TSER_SNS (1<<2)
+#define TTIPEND (1<<1)
+
+/* 0x40 Time Synchronization Channel Control Register Bits */
+#define MASTER_MODE (1<<0)
+#define TIMESTAMP_ALL (1<<1)
+
+/* 0x44 Time Synchronization Channel Event Register Bits */
+#define TX_SNAPSHOT_LOCKED (1<<0)
+#define RX_SNAPSHOT_LOCKED (1<<1)
+
+/* The ptp_ixp46x module will set this variable */
+extern int ixp46x_phc_index;
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 6fc04ffb22c2..269596c15133 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,14 +29,16 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/platform_data/eth_ixp4xx.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <mach/ixp46x_ts.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
+#include "ixp46x_ts.h"
+
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
@@ -517,25 +519,14 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
return ret;
}
-static int ixp4xx_mdio_register(void)
+static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
{
int err;
if (!(mdio_bus = mdiobus_alloc()))
return -ENOMEM;
- if (cpu_is_ixp43x()) {
- /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
- if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
- return -ENODEV;
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
- } else {
- /* All MII PHY accesses use NPE-B Ethernet registers */
- if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
- return -ENODEV;
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- }
-
+ mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
@@ -581,8 +572,8 @@ static void ixp4xx_adjust_link(struct net_device *dev)
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
- printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
- dev->name, port->speed, port->duplex ? "full" : "half");
+ netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n",
+ dev->name, port->speed, port->duplex ? "full" : "half");
}
@@ -592,7 +583,7 @@ static inline void debug_pkt(struct net_device *dev, const char *func,
#if DEBUG_PKT_BYTES
int i;
- printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
+ netdev_debug(dev, "%s(%i) ", func, len);
for (i = 0; i < len; i++) {
if (i >= DEBUG_PKT_BYTES)
break;
@@ -683,7 +674,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
int received = 0;
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
+ netdev_debug(dev, "eth_poll\n");
#endif
while (received < budget) {
@@ -697,23 +688,20 @@ static int eth_poll(struct napi_struct *napi, int budget)
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
- dev->name);
+ netdev_debug(dev, "eth_poll napi_complete\n");
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_below_low_watermark(rxq) &&
napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
- dev->name);
+ netdev_debug(dev, "eth_poll napi_reschedule succeeded\n");
#endif
qmgr_disable_irq(rxq);
continue;
}
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll all done\n",
- dev->name);
+ netdev_debug(dev, "eth_poll all done\n");
#endif
return received; /* all work done */
}
@@ -778,7 +766,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
}
#if DEBUG_RX
- printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
+ netdev_debug(dev, "eth_poll(): end, not all work done\n");
#endif
return received; /* not all work done */
}
@@ -842,7 +830,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct desc *desc;
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
+ netdev_debug(dev, "eth_xmit\n");
#endif
if (unlikely(skb->len > MAX_MRU)) {
@@ -897,22 +885,21 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
+ netdev_debug(dev, "eth_xmit queue full\n");
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
/* really empty in fact */
if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit ready again\n",
- dev->name);
+ netdev_debug(dev, "eth_xmit ready again\n");
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
+ netdev_debug(dev, "eth_xmit end\n");
#endif
ixp_tx_timestamp(port, skb);
@@ -1099,7 +1086,7 @@ static int init_queues(struct port *port)
int i;
if (!ports_open) {
- dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
@@ -1186,8 +1173,7 @@ static int eth_open(struct net_device *dev)
return err;
if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
- printk(KERN_ERR "%s: %s not responding\n", dev->name,
- npe_name(npe));
+ netdev_err(dev, "%s not responding\n", npe_name(npe));
return -EIO;
}
port->firmware[0] = msg.byte4;
@@ -1299,7 +1285,7 @@ static int eth_close(struct net_device *dev)
msg.eth_id = port->id;
msg.byte3 = 1;
if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
- printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
+ netdev_crit(dev, "unable to enable loopback\n");
i = 0;
do { /* drain RX buffers */
@@ -1323,11 +1309,11 @@ static int eth_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
- " left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain RX queue, %i buffer(s)"
+ " left in NPE\n", buffs);
#if DEBUG_CLOSE
if (!buffs)
- printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
+ netdev_debug(dev, "draining RX queue took %i cycles\n", i);
#endif
buffs = TX_DESCS;
@@ -1343,17 +1329,16 @@ static int eth_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
- "left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain TX queue, %i buffer(s) "
+ "left in NPE\n", buffs);
#if DEBUG_CLOSE
if (!buffs)
- printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+ netdev_debug(dev, "draining TX queues took %i cycles\n", i);
#endif
msg.byte3 = 0;
if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
- printk(KERN_CRIT "%s: unable to disable loopback\n",
- dev->name);
+ netdev_crit(dev, "unable to disable loopback\n");
phy_stop(dev->phydev);
@@ -1374,54 +1359,88 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int eth_init_one(struct platform_device *pdev)
+static int ixp4xx_eth_probe(struct platform_device *pdev)
{
- struct port *port;
- struct net_device *dev;
- struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
- struct phy_device *phydev = NULL;
- u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
+ struct phy_device *phydev = NULL;
+ struct device *dev = &pdev->dev;
+ struct eth_plat_info *plat;
+ resource_size_t regs_phys;
+ struct net_device *ndev;
+ struct resource *res;
+ struct port *port;
int err;
- if (!(dev = alloc_etherdev(sizeof(struct port))))
+ plat = dev_get_platdata(dev);
+
+ if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
return -ENOMEM;
- SET_NETDEV_DEV(dev, &pdev->dev);
- port = netdev_priv(dev);
- port->netdev = dev;
+ SET_NETDEV_DEV(ndev, dev);
+ port = netdev_priv(ndev);
+ port->netdev = ndev;
port->id = pdev->id;
+ /* Get the port resource and remap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ regs_phys = res->start;
+ port->regs = devm_ioremap_resource(dev, res);
+
switch (port->id) {
case IXP4XX_ETH_NPEA:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
- regs_phys = IXP4XX_EthA_BASE_PHYS;
+ /* If the MDIO bus is not up yet, defer probe */
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
case IXP4XX_ETH_NPEB:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- regs_phys = IXP4XX_EthB_BASE_PHYS;
+ /*
+ * On all except IXP43x, NPE-B is used for the MDIO bus.
+ * If there is no NPE-B in the feature set, bail out, else
+ * register the MDIO bus.
+ */
+ if (!cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEB_ETH0))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-B */
+ if ((err = ixp4xx_mdio_register(port->regs)))
+ return err;
+ }
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
case IXP4XX_ETH_NPEC:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
- regs_phys = IXP4XX_EthC_BASE_PHYS;
+ /*
+ * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
+ * of there is no NPE-C, no bus, nothing works, so bail out.
+ */
+ if (cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEC_ETH))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-C */
+ if ((err = ixp4xx_mdio_register(port->regs)))
+ return err;
+ }
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
default:
- err = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
- dev->netdev_ops = &ixp4xx_netdev_ops;
- dev->ethtool_ops = &ixp4xx_ethtool_ops;
- dev->tx_queue_len = 100;
+ ndev->netdev_ops = &ixp4xx_netdev_ops;
+ ndev->ethtool_ops = &ixp4xx_ethtool_ops;
+ ndev->tx_queue_len = 100;
- netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
+ netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
- if (!(port->npe = npe_request(NPE_ID(port->id)))) {
- err = -EIO;
- goto err_free;
- }
+ if (!(port->npe = npe_request(NPE_ID(port->id))))
+ return -EIO;
- port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
+ port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name);
if (!port->mem_res) {
err = -EBUSY;
goto err_npe_rel;
@@ -1429,9 +1448,9 @@ static int eth_init_one(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
+ memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
- platform_set_drvdata(pdev, dev);
+ platform_set_drvdata(pdev, ndev);
__raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
&port->regs->core_control);
@@ -1441,7 +1460,7 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
+ phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
err = PTR_ERR(phydev);
@@ -1450,11 +1469,11 @@ static int eth_init_one(struct platform_device *pdev)
phydev->irq = PHY_POLL;
- if ((err = register_netdev(dev)))
+ if ((err = register_netdev(ndev)))
goto err_phy_dis;
- printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
- npe_name(port->npe));
+ netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy,
+ npe_name(port->npe));
return 0;
@@ -1465,58 +1484,32 @@ err_free_mem:
release_resource(port->mem_res);
err_npe_rel:
npe_release(port->npe);
-err_free:
- free_netdev(dev);
return err;
}
-static int eth_remove_one(struct platform_device *pdev)
+static int ixp4xx_eth_remove(struct platform_device *pdev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct phy_device *phydev = dev->phydev;
- struct port *port = netdev_priv(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct phy_device *phydev = ndev->phydev;
+ struct port *port = netdev_priv(ndev);
- unregister_netdev(dev);
+ unregister_netdev(ndev);
phy_disconnect(phydev);
+ ixp4xx_mdio_remove();
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
release_resource(port->mem_res);
- free_netdev(dev);
return 0;
}
static struct platform_driver ixp4xx_eth_driver = {
.driver.name = DRV_NAME,
- .probe = eth_init_one,
- .remove = eth_remove_one,
+ .probe = ixp4xx_eth_probe,
+ .remove = ixp4xx_eth_remove,
};
-
-static int __init eth_init_module(void)
-{
- int err;
-
- /*
- * FIXME: we bail out on device tree boot but this really needs
- * to be fixed in a nicer way: this registers the MDIO bus before
- * even matching the driver infrastructure, we should only probe
- * detected hardware.
- */
- if (of_have_populated_dt())
- return -ENODEV;
- if ((err = ixp4xx_mdio_register()))
- return err;
- return platform_driver_register(&ixp4xx_eth_driver);
-}
-
-static void __exit eth_cleanup_module(void)
-{
- platform_driver_unregister(&ixp4xx_eth_driver);
- ixp4xx_mdio_remove();
-}
+module_platform_driver(ixp4xx_eth_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_eth");
-module_init(eth_init_module);
-module_exit(eth_cleanup_module);
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 67028484e9a0..9ecc395239e9 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -15,7 +15,8 @@
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
-#include <mach/ixp46x_ts.h>
+
+#include "ixp46x_ts.h"
#define DRIVER "ptp_ixp46x"
#define N_EXT_TS 2
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 56b7791911bf..077c68498f04 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -614,7 +614,7 @@ static int dfx_register(struct device *bdev)
/* Set up I/O base address. */
if (dfx_use_mmio) {
- bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
+ bp->base.mem = ioremap(bar_start[0], bar_len[0]);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
err = -ENOMEM;
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 060712c666bf..eaf85db53a5e 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1318,7 +1318,7 @@ static int fza_probe(struct device *bdev)
}
/* MMIO mapping setup. */
- mmio = ioremap_nocache(start, len);
+ mmio = ioremap(start, len);
if (!mmio) {
pr_err("%s: cannot map MMIO\n", fp->name);
ret = -ENOMEM;
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 15754451cb87..69c29a2ef95d 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -1520,22 +1520,8 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
#ifdef DUMPPACKETS
void dump_data(unsigned char *Data, int length)
{
- int i, j;
- unsigned char s[255], sh[10];
- if (length > 64) {
- length = 64;
- }
printk(KERN_INFO "---Packet start---\n");
- for (i = 0, j = 0; i < length / 8; i++, j += 8)
- printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
- Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
- Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
- strcpy(s, "");
- for (i = 0; i < length % 8; i++) {
- sprintf(sh, "%02x ", Data[j + i]);
- strcat(s, sh);
- }
- printk(KERN_INFO "%s\n", s);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
printk(KERN_INFO "------------------\n");
} // dump_data
#else
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 8a4fbfacad7e..065bb0a40b1d 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -40,7 +40,7 @@ static u8 *fjes_hw_iomap(struct fjes_hw *hw)
return NULL;
}
- base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+ base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
return base;
}
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 91a1059517f5..8c810edece86 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -48,7 +48,7 @@ static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
static int fjes_change_mtu(struct net_device *, int);
static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
-static void fjes_tx_retry(struct net_device *);
+static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
static int fjes_acpi_add(struct acpi_device *);
static int fjes_acpi_remove(struct acpi_device *);
@@ -795,7 +795,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
-static void fjes_tx_retry(struct net_device *netdev)
+static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
{
struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
index c611b6a80b20..9237b69d8e21 100644
--- a/drivers/net/fjes/fjes_trace.h
+++ b/drivers/net/fjes/fjes_trace.h
@@ -28,7 +28,7 @@ TRACE_EVENT(fjes_hw_issue_request_command,
__field(u8, cs_busy)
__field(u8, cs_complete)
__field(int, timeout)
- __field(int, ret);
+ __field(int, ret)
),
TP_fast_assign(
__entry->cr_req = cr->bits.req_code;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index f6222ada6818..7032a2405e1a 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
return NULL;
}
- if (sock->sk->sk_protocol != IPPROTO_UDP) {
+ sk = sock->sk;
+ if (sk->sk_protocol != IPPROTO_UDP ||
+ sk->sk_type != SOCK_DGRAM ||
+ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
- lock_sock(sock->sk);
- if (sock->sk->sk_user_data) {
+ lock_sock(sk);
+ if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_rel_sock;
}
- sk = sock->sk;
sock_hold(sk);
tuncfg.sk_user_data = gtp;
@@ -852,8 +854,7 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
if (IS_ERR(sk1u)) {
- if (sk0)
- gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk0);
return PTR_ERR(sk1u);
}
}
@@ -861,10 +862,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) {
- if (sk0)
- gtp_encap_disable_sock(sk0);
- if (sk1u)
- gtp_encap_disable_sock(sk1u);
+ gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk1u);
return -EINVAL;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index df495b5595f5..e7413a643929 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -687,8 +687,6 @@ struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
struct hdlcdrv_state *s;
int err;
- BUG_ON(ops == NULL);
-
if (privsize < sizeof(struct hdlcdrv_state))
privsize = sizeof(struct hdlcdrv_state);
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
index 3a2aa0708166..0db7ccaec4a4 100644
--- a/drivers/net/hyperv/Makefile
+++ b/drivers/net/hyperv/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o netvsc_bpf.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dc44819946e6..abda736e7c7d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -142,6 +142,8 @@ struct netvsc_device_info {
u32 send_section_size;
u32 recv_section_size;
+ struct bpf_prog *bprog;
+
u8 rss_key[NETVSC_HASH_KEYLEN];
};
@@ -189,7 +191,8 @@ int netvsc_send(struct net_device *net,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *page_buffer,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool xdp_tx);
void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp);
int netvsc_recv_callback(struct net_device *net,
@@ -198,6 +201,16 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp);
+unsigned int netvsc_xdp_fraglen(unsigned int len);
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev);
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev);
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_device_info *dev_info);
@@ -832,6 +845,8 @@ struct nvsp_message {
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
+#define NETVSC_XDP_HDRM 256
+
struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
@@ -867,6 +882,7 @@ struct netvsc_stats {
u64 bytes;
u64 broadcast;
u64 multicast;
+ u64 xdp_drop;
struct u64_stats_sync syncp;
};
@@ -972,6 +988,9 @@ struct netvsc_channel {
atomic_t queue_sends;
struct nvsc_rsc rsc;
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+
struct netvsc_stats tx_stats;
struct netvsc_stats rx_stats;
};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index eab83e71567a..ae3f3084c2ed 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -122,8 +122,10 @@ static void free_netvsc_device(struct rcu_head *head)
vfree(nvdev->send_buf);
kfree(nvdev->send_section_map);
- for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
vfree(nvdev->chan_table[i].mrc.slots);
+ }
kfree(nvdev);
}
@@ -900,7 +902,8 @@ int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool xdp_tx)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
@@ -923,10 +926,11 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
- /* Send control message directly without accessing msd (Multi-Send
- * Data) field which may be changed during data packet processing.
+ /* Send a control message or XDP packet directly without accessing
+ * msd (Multi-Send Data) field which may be changed during data packet
+ * processing.
*/
- if (!skb)
+ if (!skb || xdp_tx)
return netvsc_send_pkt(device, packet, net_device, pb, skb);
/* batch packets in send buffer if possible */
@@ -1392,6 +1396,21 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
nvchan->net_device = net_device;
u64_stats_init(&nvchan->tx_stats.syncp);
u64_stats_init(&nvchan->rx_stats.syncp);
+
+ ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
+
+ if (ret) {
+ netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
+ goto cleanup2;
+ }
+
+ ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (ret) {
+ netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
+ goto cleanup2;
+ }
}
/* Enable NAPI handler before init callbacks */
@@ -1437,6 +1456,8 @@ close:
cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
+
+cleanup2:
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
new file mode 100644
index 000000000000..20adfe544294
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/kernel.h>
+#include <net/xdp.h>
+
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+
+#include "hyperv_net.h"
+
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
+{
+ void *data = nvchan->rsc.data[0];
+ u32 len = nvchan->rsc.len[0];
+ struct page *page = NULL;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ xdp->data_hard_start = NULL;
+
+ rcu_read_lock();
+ prog = rcu_dereference(nvchan->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ /* allocate page buffer for data */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ act = XDP_DROP;
+ goto out;
+ }
+
+ xdp->data_hard_start = page_address(page);
+ xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &nvchan->xdp_rxq;
+ xdp->handle = 0;
+
+ memcpy(xdp->data, data, len);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ if (page && act != XDP_PASS && act != XDP_TX) {
+ __free_page(page);
+ xdp->data_hard_start = NULL;
+ }
+
+ return act;
+}
+
+unsigned int netvsc_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
+{
+ return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
+}
+
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev)
+{
+ struct bpf_prog *old_prog;
+ int buf_max, i;
+
+ old_prog = netvsc_xdp_get(nvdev);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
+ dev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && (dev->features & NETIF_F_LRO)) {
+ netdev_err(dev, "XDP: not support LRO\n");
+ NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog)
+ bpf_prog_add(prog, nvdev->num_chn);
+
+ for (i = 0; i < nvdev->num_chn; i++)
+ rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < nvdev->num_chn; i++)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
+{
+ struct netdev_bpf xdp;
+ bpf_op_t ndo_bpf;
+
+ ASSERT_RTNL();
+
+ if (!vf_netdev)
+ return 0;
+
+ ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
+ if (!ndo_bpf)
+ return 0;
+
+ memset(&xdp, 0, sizeof(xdp));
+
+ xdp.command = XDP_SETUP_PROG;
+ xdp.prog = prog;
+
+ return ndo_bpf(vf_netdev, &xdp);
+}
+
+static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
+{
+ struct bpf_prog *prog = netvsc_xdp_get(nvdev);
+
+ if (prog)
+ return prog->aux->id;
+
+ return 0;
+}
+
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct net_device_context *ndevctx = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ if (!nvdev || nvdev->destroy) {
+ if (bpf->command == XDP_QUERY_PROG) {
+ bpf->prog_id = 0;
+ return 0; /* Query must always succeed */
+ } else {
+ return -ENODEV;
+ }
+ }
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
+
+ if (ret)
+ return ret;
+
+ ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
+
+ if (ret) {
+ netdev_err(dev, "vf_setxdp failed:%d\n", ret);
+ NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
+
+ netvsc_xdp_set(dev, NULL, extack, nvdev);
+ }
+
+ return ret;
+
+ case XDP_QUERY_PROG:
+ bpf->prog_id = netvsc_xdp_query(nvdev);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f3f9eb8a402a..8fc71bd49894 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
+#include <linux/bpf.h>
#include <net/arp.h>
#include <net/route.h>
@@ -519,7 +520,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
return rc;
}
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet = NULL;
@@ -686,7 +687,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* timestamp packet in software */
skb_tx_timestamp(skb);
- ret = netvsc_send(net, packet, rndis_msg, pb, skb);
+ ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
if (likely(ret == 0))
return NETDEV_TX_OK;
@@ -709,6 +710,11 @@ no_memory:
goto drop;
}
+static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ return netvsc_xmit(skb, ndev, false);
+}
+
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
@@ -751,6 +757,22 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+
+ skb->queue_mapping = skb_get_rx_queue(skb);
+ __skb_push(skb, ETH_HLEN);
+
+ rc = netvsc_xmit(skb, ndev, true);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
static void netvsc_comp_ipcsum(struct sk_buff *skb)
{
struct iphdr *iph = (struct iphdr *)skb->data;
@@ -760,7 +782,8 @@ static void netvsc_comp_ipcsum(struct sk_buff *skb)
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- struct netvsc_channel *nvchan)
+ struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
@@ -768,18 +791,37 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
nvchan->rsc.csum_info;
const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
+ void *xbuf = xdp->data_hard_start;
int i;
- skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
- if (!skb)
- return skb;
+ if (xbuf) {
+ unsigned int hdroom = xdp->data - xdp->data_hard_start;
+ unsigned int xlen = xdp->data_end - xdp->data;
+ unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
- /*
- * Copy to skb. This copy is needed here since the memory pointed by
- * hv_netvsc_packet cannot be deallocated
- */
- for (i = 0; i < nvchan->rsc.cnt; i++)
- skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
+ skb = build_skb(xbuf, frag_size);
+
+ if (!skb) {
+ __free_page(virt_to_page(xbuf));
+ return NULL;
+ }
+
+ skb_reserve(skb, hdroom);
+ skb_put(skb, xlen);
+ skb->dev = napi->dev;
+ } else {
+ skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
+
+ if (!skb)
+ return NULL;
+
+ /* Copy to skb. This copy is needed here since the memory
+ * pointed by hv_netvsc_packet cannot be deallocated.
+ */
+ for (i = 0; i < nvchan->rsc.cnt; i++)
+ skb_put_data(skb, nvchan->rsc.data[i],
+ nvchan->rsc.len[i]);
+ }
skb->protocol = eth_type_trans(skb, net);
@@ -829,13 +871,25 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats;
+ struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct xdp_buff xdp;
+ u32 act;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
+ act = netvsc_run_xdp(net, nvchan, &xdp);
+
+ if (act != XDP_PASS && act != XDP_TX) {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ return NVSP_STAT_SUCCESS; /* consumed by XDP */
+ }
+
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, nvchan);
+ skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
@@ -849,7 +903,6 @@ int netvsc_recv_callback(struct net_device *net,
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -860,6 +913,11 @@ int netvsc_recv_callback(struct net_device *net,
++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
+ if (act == XDP_TX) {
+ netvsc_xdp_xmit(skb, net);
+ return NVSP_STAT_SUCCESS;
+ }
+
napi_gro_receive(&nvchan->napi, skb);
return NVSP_STAT_SUCCESS;
}
@@ -886,10 +944,11 @@ static void netvsc_get_channels(struct net_device *net,
/* Alloc struct netvsc_device_info, and initialize it from either existing
* struct netvsc_device, or from default values.
*/
-static struct netvsc_device_info *netvsc_devinfo_get
- (struct netvsc_device *nvdev)
+static
+struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
{
struct netvsc_device_info *dev_info;
+ struct bpf_prog *prog;
dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
@@ -897,6 +956,8 @@ static struct netvsc_device_info *netvsc_devinfo_get
return NULL;
if (nvdev) {
+ ASSERT_RTNL();
+
dev_info->num_chn = nvdev->num_chn;
dev_info->send_sections = nvdev->send_section_cnt;
dev_info->send_section_size = nvdev->send_section_size;
@@ -905,6 +966,12 @@ static struct netvsc_device_info *netvsc_devinfo_get
memcpy(dev_info->rss_key, nvdev->extension->rss_key,
NETVSC_HASH_KEYLEN);
+
+ prog = netvsc_xdp_get(nvdev);
+ if (prog) {
+ bpf_prog_inc(prog);
+ dev_info->bprog = prog;
+ }
} else {
dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
dev_info->send_sections = NETVSC_DEFAULT_TX;
@@ -916,6 +983,17 @@ static struct netvsc_device_info *netvsc_devinfo_get
return dev_info;
}
+/* Free struct netvsc_device_info */
+static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
+{
+ if (dev_info->bprog) {
+ ASSERT_RTNL();
+ bpf_prog_put(dev_info->bprog);
+ }
+
+ kfree(dev_info);
+}
+
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
@@ -927,6 +1005,8 @@ static int netvsc_detach(struct net_device *ndev,
if (cancel_work_sync(&nvdev->subchan_work))
nvdev->num_chn = 1;
+ netvsc_xdp_set(ndev, NULL, NULL, nvdev);
+
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
netvsc_tx_disable(nvdev, ndev);
@@ -960,7 +1040,8 @@ static int netvsc_attach(struct net_device *ndev,
struct hv_device *hdev = ndev_ctx->device_ctx;
struct netvsc_device *nvdev;
struct rndis_device *rdev;
- int ret;
+ struct bpf_prog *prog;
+ int ret = 0;
nvdev = rndis_filter_device_add(hdev, dev_info);
if (IS_ERR(nvdev))
@@ -976,6 +1057,13 @@ static int netvsc_attach(struct net_device *ndev,
}
}
+ prog = dev_info->bprog;
+ if (prog) {
+ ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
+ if (ret)
+ goto err1;
+ }
+
/* In any case device is now ready */
netif_device_attach(ndev);
@@ -985,7 +1073,7 @@ static int netvsc_attach(struct net_device *ndev,
if (netif_running(ndev)) {
ret = rndis_filter_open(nvdev);
if (ret)
- goto err;
+ goto err2;
rdev = nvdev->extension;
if (!rdev->link_state)
@@ -994,9 +1082,10 @@ static int netvsc_attach(struct net_device *ndev,
return 0;
-err:
+err2:
netif_device_detach(ndev);
+err1:
rndis_filter_device_remove(hdev, nvdev);
return ret;
@@ -1046,7 +1135,7 @@ static int netvsc_set_channels(struct net_device *net,
}
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
@@ -1153,7 +1242,7 @@ rollback_vf:
dev_set_mtu(vf_netdev, orig_mtu);
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
@@ -1378,8 +1467,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 4 statistics per queue (rx/tx packets/bytes) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
+/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1411,6 +1500,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_drop;
int i, j, cpu;
if (!nvdev)
@@ -1439,9 +1529,11 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&qstats->syncp);
packets = qstats->packets;
bytes = qstats->bytes;
+ xdp_drop = qstats->xdp_drop;
} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_drop;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
@@ -1489,6 +1581,8 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
}
for_each_present_cpu(cpu) {
@@ -1785,10 +1879,27 @@ static int netvsc_set_ringparam(struct net_device *ndev,
}
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
+static netdev_features_t netvsc_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+
+ if (!nvdev || nvdev->destroy)
+ return features;
+
+ if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
+ features ^= NETIF_F_LRO;
+ netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
+ }
+
+ return features;
+}
+
static int netvsc_set_features(struct net_device *ndev,
netdev_features_t features)
{
@@ -1875,12 +1986,14 @@ static const struct net_device_ops device_ops = {
.ndo_start_xmit = netvsc_start_xmit,
.ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
+ .ndo_fix_features = netvsc_fix_features,
.ndo_set_features = netvsc_set_features,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
+ .ndo_bpf = netvsc_bpf,
};
/*
@@ -2167,6 +2280,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
+ struct bpf_prog *prog;
struct net_device *ndev;
int ret;
@@ -2211,6 +2325,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
vf_netdev->wanted_features = ndev->features;
netdev_update_features(vf_netdev);
+ prog = netvsc_xdp_get(netvsc_dev);
+ netvsc_vf_setxdp(vf_netdev, prog);
+
return NOTIFY_OK;
}
@@ -2252,6 +2369,8 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+ netvsc_vf_setxdp(vf_netdev, NULL);
+
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2363,14 +2482,14 @@ static int netvsc_probe(struct hv_device *dev,
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
@@ -2398,8 +2517,10 @@ static int netvsc_remove(struct hv_device *dev)
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
- if (nvdev)
+ if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
+ netvsc_xdp_set(net, NULL, NULL, nvdev);
+ }
/*
* Call to the vsc driver to let it know that the device is being
@@ -2472,11 +2593,11 @@ static int netvsc_resume(struct hv_device *dev)
ret = netvsc_attach(net, device_info);
- rtnl_unlock();
-
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
net_device_ctx->saved_netvsc_dev_info = NULL;
+ rtnl_unlock();
+
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 857c4bea451c..b81ceba38218 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -235,7 +235,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
+ ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
rcu_read_unlock_bh();
return ret;
@@ -1443,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
/* Halt and release the rndis device */
rndis_filter_halt_device(net_dev, rndis_dev);
- net_dev->extension = NULL;
-
netvsc_device_remove(dev);
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index afd8b2a08245..45bfd99f17fa 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -11,16 +11,17 @@
#include <linux/module.h>
#include <crypto/aead.h>
#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/refcount.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <net/gro_cells.h>
+#include <net/macsec.h>
+#include <linux/phy.h>
#include <uapi/linux/if_macsec.h>
-typedef u64 __bitwise sci_t;
-
#define MACSEC_SCI_LEN 8
/* SecTAG length = macsec_eth_header without the optional SCI */
@@ -58,8 +59,6 @@ struct macsec_eth_header {
#define GCM_AES_IV_LEN 12
#define DEFAULT_ICV_LEN 16
-#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
-
#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
sc; \
@@ -77,49 +76,6 @@ struct gcm_iv {
__be32 pn;
};
-/**
- * struct macsec_key - SA key
- * @id: user-provided key identifier
- * @tfm: crypto struct, key storage
- */
-struct macsec_key {
- u8 id[MACSEC_KEYID_LEN];
- struct crypto_aead *tfm;
-};
-
-struct macsec_rx_sc_stats {
- __u64 InOctetsValidated;
- __u64 InOctetsDecrypted;
- __u64 InPktsUnchecked;
- __u64 InPktsDelayed;
- __u64 InPktsOK;
- __u64 InPktsInvalid;
- __u64 InPktsLate;
- __u64 InPktsNotValid;
- __u64 InPktsNotUsingSA;
- __u64 InPktsUnusedSA;
-};
-
-struct macsec_rx_sa_stats {
- __u32 InPktsOK;
- __u32 InPktsInvalid;
- __u32 InPktsNotValid;
- __u32 InPktsNotUsingSA;
- __u32 InPktsUnusedSA;
-};
-
-struct macsec_tx_sa_stats {
- __u32 OutPktsProtected;
- __u32 OutPktsEncrypted;
-};
-
-struct macsec_tx_sc_stats {
- __u64 OutPktsProtected;
- __u64 OutPktsEncrypted;
- __u64 OutOctetsProtected;
- __u64 OutOctetsEncrypted;
-};
-
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
@@ -131,124 +87,8 @@ struct macsec_dev_stats {
__u64 InPktsOverrun;
};
-/**
- * struct macsec_rx_sa - receive secure association
- * @active:
- * @next_pn: packet number expected for the next packet
- * @lock: protects next_pn manipulations
- * @key: key structure
- * @stats: per-SA stats
- */
-struct macsec_rx_sa {
- struct macsec_key key;
- spinlock_t lock;
- u32 next_pn;
- refcount_t refcnt;
- bool active;
- struct macsec_rx_sa_stats __percpu *stats;
- struct macsec_rx_sc *sc;
- struct rcu_head rcu;
-};
-
-struct pcpu_rx_sc_stats {
- struct macsec_rx_sc_stats stats;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct macsec_rx_sc - receive secure channel
- * @sci: secure channel identifier for this SC
- * @active: channel is active
- * @sa: array of secure associations
- * @stats: per-SC stats
- */
-struct macsec_rx_sc {
- struct macsec_rx_sc __rcu *next;
- sci_t sci;
- bool active;
- struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
- struct pcpu_rx_sc_stats __percpu *stats;
- refcount_t refcnt;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct macsec_tx_sa - transmit secure association
- * @active:
- * @next_pn: packet number to use for the next packet
- * @lock: protects next_pn manipulations
- * @key: key structure
- * @stats: per-SA stats
- */
-struct macsec_tx_sa {
- struct macsec_key key;
- spinlock_t lock;
- u32 next_pn;
- refcount_t refcnt;
- bool active;
- struct macsec_tx_sa_stats __percpu *stats;
- struct rcu_head rcu;
-};
-
-struct pcpu_tx_sc_stats {
- struct macsec_tx_sc_stats stats;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct macsec_tx_sc - transmit secure channel
- * @active:
- * @encoding_sa: association number of the SA currently in use
- * @encrypt: encrypt packets on transmit, or authenticate only
- * @send_sci: always include the SCI in the SecTAG
- * @end_station:
- * @scb: single copy broadcast flag
- * @sa: array of secure associations
- * @stats: stats for this TXSC
- */
-struct macsec_tx_sc {
- bool active;
- u8 encoding_sa;
- bool encrypt;
- bool send_sci;
- bool end_station;
- bool scb;
- struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
- struct pcpu_tx_sc_stats __percpu *stats;
-};
-
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
-/**
- * struct macsec_secy - MACsec Security Entity
- * @netdev: netdevice for this SecY
- * @n_rx_sc: number of receive secure channels configured on this SecY
- * @sci: secure channel identifier used for tx
- * @key_len: length of keys used by the cipher suite
- * @icv_len: length of ICV used by the cipher suite
- * @validate_frames: validation mode
- * @operational: MAC_Operational flag
- * @protect_frames: enable protection for this SecY
- * @replay_protect: enable packet number checks on receive
- * @replay_window: size of the replay window
- * @tx_sc: transmit secure channel
- * @rx_sc: linked list of receive secure channels
- */
-struct macsec_secy {
- struct net_device *netdev;
- unsigned int n_rx_sc;
- sci_t sci;
- u16 key_len;
- u16 icv_len;
- enum macsec_validation_type validate_frames;
- bool operational;
- bool protect_frames;
- bool replay_protect;
- u32 replay_window;
- struct macsec_tx_sc tx_sc;
- struct macsec_rx_sc __rcu *rx_sc;
-};
-
struct pcpu_secy_stats {
struct macsec_dev_stats stats;
struct u64_stats_sync syncp;
@@ -260,6 +100,7 @@ struct pcpu_secy_stats {
* @real_dev: pointer to underlying netdevice
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
+ * @offload: status of offloading on the MACsec device
*/
struct macsec_dev {
struct macsec_secy secy;
@@ -267,6 +108,7 @@ struct macsec_dev {
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
+ enum macsec_offload offload;
};
/**
@@ -480,6 +322,56 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
h->short_length = data_len;
}
+/* Checks if a MACsec interface is being offloaded to an hardware engine */
+static bool macsec_is_offloaded(struct macsec_dev *macsec)
+{
+ if (macsec->offload == MACSEC_OFFLOAD_PHY)
+ return true;
+
+ return false;
+}
+
+/* Checks if underlying layers implement MACsec offloading functions. */
+static bool macsec_check_offload(enum macsec_offload offload,
+ struct macsec_dev *macsec)
+{
+ if (!macsec || !macsec->real_dev)
+ return false;
+
+ if (offload == MACSEC_OFFLOAD_PHY)
+ return macsec->real_dev->phydev &&
+ macsec->real_dev->phydev->macsec_ops;
+
+ return false;
+}
+
+static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
+ struct macsec_dev *macsec,
+ struct macsec_context *ctx)
+{
+ if (ctx) {
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->offload = offload;
+
+ if (offload == MACSEC_OFFLOAD_PHY)
+ ctx->phydev = macsec->real_dev->phydev;
+ }
+
+ return macsec->real_dev->phydev->macsec_ops;
+}
+
+/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
+ * context device reference if provided.
+ */
+static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
+ struct macsec_context *ctx)
+{
+ if (!macsec_check_offload(macsec->offload, macsec))
+ return NULL;
+
+ return __macsec_get_ops(macsec->offload, macsec, ctx);
+}
+
/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
{
@@ -532,6 +424,23 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
+static void __macsec_pn_wrapped(struct macsec_secy *secy,
+ struct macsec_tx_sa *tx_sa)
+{
+ pr_debug("PN wrapped, transitioning to !oper\n");
+ tx_sa->active = false;
+ if (secy->protect_frames)
+ secy->operational = false;
+}
+
+void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
+{
+ spin_lock_bh(&tx_sa->lock);
+ __macsec_pn_wrapped(secy, tx_sa);
+ spin_unlock_bh(&tx_sa->lock);
+}
+EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
+
static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
{
u32 pn;
@@ -540,12 +449,8 @@ static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
pn = tx_sa->next_pn;
tx_sa->next_pn++;
- if (tx_sa->next_pn == 0) {
- pr_debug("PN wrapped, transitioning to !oper\n");
- tx_sa->active = false;
- if (secy->protect_frames)
- secy->operational = false;
- }
+ if (tx_sa->next_pn == 0)
+ __macsec_pn_wrapped(secy, tx_sa);
spin_unlock_bh(&tx_sa->lock);
return pn;
@@ -1029,8 +934,10 @@ static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
return NULL;
}
-static void handle_not_macsec(struct sk_buff *skb)
+static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
+ /* Deliver to the uncontrolled port by default */
+ enum rx_handler_result ret = RX_HANDLER_PASS;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
@@ -1045,7 +952,8 @@ static void handle_not_macsec(struct sk_buff *skb)
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
- if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ if (!macsec_is_offloaded(macsec) &&
+ macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1064,9 +972,17 @@ static void handle_not_macsec(struct sk_buff *skb)
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
+
+ if (netif_running(macsec->secy.netdev) &&
+ macsec_is_offloaded(macsec)) {
+ ret = RX_HANDLER_EXACT;
+ goto out;
+ }
}
+out:
rcu_read_unlock();
+ return ret;
}
static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
@@ -1091,12 +1007,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
goto drop_direct;
hdr = macsec_ethhdr(skb);
- if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
- handle_not_macsec(skb);
-
- /* and deliver to the uncontrolled port */
- return RX_HANDLER_PASS;
- }
+ if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
+ return handle_not_macsec(skb);
skb = skb_unshare(skb, GFP_ATOMIC);
*pskb = skb;
@@ -1585,6 +1497,7 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
+ [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
};
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
@@ -1602,6 +1515,44 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
.len = MACSEC_MAX_KEY_LEN, },
};
+static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
+ [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+};
+
+/* Offloads an operation to a device driver */
+static int macsec_offload(int (* const func)(struct macsec_context *),
+ struct macsec_context *ctx)
+{
+ int ret;
+
+ if (unlikely(!func))
+ return 0;
+
+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
+ mutex_lock(&ctx->phydev->lock);
+
+ /* Phase I: prepare. The drive should fail here if there are going to be
+ * issues in the commit phase.
+ */
+ ctx->prepare = true;
+ ret = (*func)(ctx);
+ if (ret)
+ goto phy_unlock;
+
+ /* Phase II: commit. This step cannot fail. */
+ ctx->prepare = false;
+ ret = (*func)(ctx);
+ /* This should never happen: commit is not allowed to fail */
+ if (unlikely(ret))
+ WARN(1, "MACsec offloading commit failed (%d)\n", ret);
+
+phy_unlock:
+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
+ mutex_unlock(&ctx->phydev->lock);
+
+ return ret;
+}
+
static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
{
if (!attrs[MACSEC_ATTR_SA_CONFIG])
@@ -1717,13 +1668,40 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
- nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rx_sa->sc = rx_sc;
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ MACSEC_KEYID_LEN);
+
+ err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ if (err)
+ goto cleanup;
+ }
+
+ nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ kfree(rx_sa);
+ rtnl_unlock();
+ return err;
}
static bool validate_add_rxsc(struct nlattr **attrs)
@@ -1746,6 +1724,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct nlattr **attrs = info->attrs;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ bool was_active;
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1771,12 +1751,35 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(rx_sc);
}
+ was_active = rx_sc->active;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+
+ ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ rx_sc->active = was_active;
+ rtnl_unlock();
+ return ret;
}
static bool validate_add_txsa(struct nlattr **attrs)
@@ -1813,6 +1816,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
unsigned char assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_operational;
int err;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1863,8 +1867,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
return err;
}
- nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
-
spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
@@ -1872,14 +1874,43 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ was_operational = secy->operational;
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ MACSEC_KEYID_LEN);
+
+ err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ if (err)
+ goto cleanup;
+ }
+
+ nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ secy->operational = was_operational;
+ kfree(tx_sa);
+ rtnl_unlock();
+ return err;
}
static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
@@ -1892,6 +1923,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1915,12 +1947,35 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+
+ ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
clear_rx_sa(rx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
@@ -1931,6 +1986,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
sci_t sci;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1957,10 +2013,31 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
return -ENODEV;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+ ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
free_rx_sc(rx_sc);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
@@ -1972,6 +2049,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1992,12 +2070,35 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+
+ ret = macsec_offload(ops->mdo_del_txsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
clear_tx_sa(tx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static bool validate_upd_sa(struct nlattr **attrs)
@@ -2030,6 +2131,9 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_operational, was_active;
+ u32 prev_pn = 0;
+ int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2050,19 +2154,52 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock);
+ prev_pn = tx_sa->next_pn;
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
+ was_active = tx_sa->active;
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ was_operational = secy->operational;
if (assoc_num == tx_sc->encoding_sa)
secy->operational = tx_sa->active;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+
+ ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = prev_pn;
+ spin_unlock_bh(&tx_sa->lock);
+ }
+ tx_sa->active = was_active;
+ secy->operational = was_operational;
+ rtnl_unlock();
+ return ret;
}
static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
@@ -2075,6 +2212,9 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_active;
+ u32 prev_pn = 0;
+ int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2098,15 +2238,46 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
+ prev_pn = rx_sa->next_pn;
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
+ was_active = rx_sa->active;
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+
+ ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = prev_pn;
+ spin_unlock_bh(&rx_sa->lock);
+ }
+ rx_sa->active = was_active;
+ rtnl_unlock();
+ return ret;
}
static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
@@ -2116,6 +2287,9 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ unsigned int prev_n_rx_sc;
+ bool was_active;
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2133,6 +2307,8 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(rx_sc);
}
+ was_active = rx_sc->active;
+ prev_n_rx_sc = secy->n_rx_sc;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
@@ -2142,9 +2318,153 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
rx_sc->active = new;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+
+ ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+
+cleanup:
+ secy->n_rx_sc = prev_n_rx_sc;
+ rx_sc->active = was_active;
rtnl_unlock();
+ return ret;
+}
+
+static bool macsec_is_configured(struct macsec_dev *macsec)
+{
+ struct macsec_secy *secy = &macsec->secy;
+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ int i;
+
+ if (secy->n_rx_sc > 0)
+ return true;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++)
+ if (tx_sc->sa[i])
+ return true;
+
+ return false;
+}
+
+static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
+ enum macsec_offload offload, prev_offload;
+ int (*func)(struct macsec_context *ctx);
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev, *loop_dev;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+ struct net *loop_net;
+ int ret;
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (!attrs[MACSEC_ATTR_OFFLOAD])
+ return -EINVAL;
+
+ if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
+ attrs[MACSEC_ATTR_OFFLOAD],
+ macsec_genl_offload_policy, NULL))
+ return -EINVAL;
+
+ dev = get_dev_from_nl(genl_info_net(info), attrs);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ macsec = macsec_priv(dev);
+
+ offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
+ if (macsec->offload == offload)
+ return 0;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ if (offload == MACSEC_OFFLOAD_OFF)
+ goto skip_limitation;
+ /* Check the physical interface isn't offloading another interface
+ * first.
+ */
+ for_each_net(loop_net) {
+ for_each_netdev(loop_net, loop_dev) {
+ struct macsec_dev *priv;
+
+ if (!netif_is_macsec(loop_dev))
+ continue;
+
+ priv = macsec_priv(loop_dev);
+
+ if (priv->real_dev == macsec->real_dev &&
+ priv->offload != MACSEC_OFFLOAD_OFF)
+ return -EBUSY;
+ }
+ }
+
+skip_limitation:
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ rtnl_lock();
+
+ prev_offload = macsec->offload;
+ macsec->offload = offload;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec)) {
+ ret = -EBUSY;
+ goto rollback;
+ }
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto rollback;
+ }
+
+ if (prev_offload == MACSEC_OFFLOAD_OFF)
+ func = ops->mdo_add_secy;
+ else
+ func = ops->mdo_del_secy;
+
+ ctx.secy = &macsec->secy;
+ ret = macsec_offload(func, &ctx);
+ if (ret)
+ goto rollback;
+
+ rtnl_unlock();
return 0;
+
+rollback:
+ macsec->offload = prev_offload;
+
+ rtnl_unlock();
+ return ret;
}
static int copy_tx_sa_stats(struct sk_buff *skb,
@@ -2408,12 +2728,13 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb)
{
- struct macsec_rx_sc *rx_sc;
+ struct macsec_dev *macsec = netdev_priv(dev);
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
- int i, j;
- void *hdr;
+ struct macsec_rx_sc *rx_sc;
struct nlattr *attr;
+ void *hdr;
+ int i, j;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
@@ -2425,6 +2746,13 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
+ if (!attr)
+ goto nla_put_failure;
+ if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
+ goto nla_put_failure;
+ nla_nest_end(skb, attr);
+
if (nla_put_secy(secy, skb))
goto nla_put_failure;
@@ -2690,6 +3018,12 @@ static const struct genl_ops macsec_genl_ops[] = {
.doit = macsec_upd_rxsa,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = MACSEC_CMD_UPD_OFFLOAD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = macsec_upd_offload,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family macsec_fam __ro_after_init = {
@@ -2712,6 +3046,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct pcpu_secy_stats *secy_stats;
int ret, len;
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ skb->dev = macsec->real_dev;
+ return dev_queue_xmit(skb);
+ }
+
/* 10.5 */
if (!secy->protect_frames) {
secy_stats = this_cpu_ptr(macsec->stats);
@@ -2825,6 +3164,22 @@ static int macsec_dev_open(struct net_device *dev)
goto clear_allmulti;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto clear_allmulti;
+ }
+
+ err = macsec_offload(ops->mdo_dev_open, &ctx);
+ if (err)
+ goto clear_allmulti;
+ }
+
if (netif_carrier_ok(real_dev))
netif_carrier_on(dev);
@@ -2845,6 +3200,16 @@ static int macsec_dev_stop(struct net_device *dev)
netif_carrier_off(dev);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops)
+ macsec_offload(ops->mdo_dev_stop, &ctx);
+ }
+
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
@@ -3076,6 +3441,11 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_tx_sa tx_sc;
+ struct macsec_secy secy;
+ int ret;
+
if (!data)
return 0;
@@ -3085,7 +3455,41 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
- return macsec_changelink_common(dev, data);
+ /* Keep a copy of unmodified secy and tx_sc, in case the offload
+ * propagation fails, to revert macsec_changelink_common.
+ */
+ memcpy(&secy, &macsec->secy, sizeof(secy));
+ memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
+
+ ret = macsec_changelink_common(dev, data);
+ if (ret)
+ return ret;
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ int ret;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.secy = &macsec->secy;
+ ret = macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
+ memcpy(&macsec->secy, &secy, sizeof(secy));
+
+ return ret;
}
static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3128,6 +3532,18 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -3239,6 +3655,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec->real_dev = real_dev;
+ /* MACsec offloading is off by default */
+ macsec->offload = MACSEC_OFFLOAD_OFF;
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 747c0542a53c..81aa7adf4801 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -259,7 +259,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct net_device *src,
enum macvlan_mode mode)
{
- const struct ethhdr *eth = skb_eth_hdr(skb);
+ const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
struct sk_buff *nskb;
unsigned int i;
@@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *dest;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
- const struct ethhdr *eth = (void *)skb->data;
+ const struct ethhdr *eth = skb_eth_hdr(skb);
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
goto xmit_world;
}
@@ -1036,8 +1037,8 @@ static int macvlan_ethtool_get_ts_info(struct net_device *dev,
const struct ethtool_ops *ops = real_dev->ethtool_ops;
struct phy_device *phydev = real_dev->phydev;
- if (phydev && phydev->drv && phydev->drv->ts_info) {
- return phydev->drv->ts_info(phydev, info);
+ if (phy_has_tsinfo(phydev)) {
+ return phy_ts_info(phydev, info);
} else if (ops->get_ts_info) {
return ops->get_ts_info(real_dev, info);
} else {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 059711edfc61..b53fbc06e104 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
- id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+ id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
dummy_data, id, kfree);
if (err) {
@@ -270,7 +270,7 @@ struct nsim_trap_data {
};
/* All driver-specific traps must be documented in
- * Documentation/networking/devlink-trap-netdevsim.rst
+ * Documentation/networking/devlink/netdevsim.rst
*/
enum {
NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 13540dee7364..f32d56ac3e80 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -14,6 +14,12 @@
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
+#include <linux/in6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#include <net/fib_notifier.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
@@ -36,6 +42,48 @@ struct nsim_fib_data {
struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
+ struct rhashtable fib_rt_ht;
+ struct list_head fib_rt_list;
+ spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct devlink *devlink;
+};
+
+struct nsim_fib_rt_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+ int family;
+ u32 tb_id;
+};
+
+struct nsim_fib_rt {
+ struct nsim_fib_rt_key key;
+ struct rhash_head ht_node;
+ struct list_head list; /* Member of fib_rt_list */
+};
+
+struct nsim_fib4_rt {
+ struct nsim_fib_rt common;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+};
+
+struct nsim_fib6_rt {
+ struct nsim_fib_rt common;
+ struct list_head nh_list;
+ unsigned int nhs;
+};
+
+struct nsim_fib6_rt_nh {
+ struct list_head list; /* Member of nh_list */
+ struct fib6_info *rt;
+};
+
+static const struct rhashtable_params nsim_fib_rt_ht_params = {
+ .key_offset = offsetof(struct nsim_fib_rt, key),
+ .head_offset = offsetof(struct nsim_fib_rt, ht_node),
+ .key_len = sizeof(struct nsim_fib_rt_key),
+ .automatic_shrinking = true,
};
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
@@ -144,18 +192,556 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
+static void nsim_fib_rt_init(struct nsim_fib_data *data,
+ struct nsim_fib_rt *fib_rt, const void *addr,
+ size_t addr_len, unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ memcpy(fib_rt->key.addr, addr, addr_len);
+ fib_rt->key.prefix_len = prefix_len;
+ fib_rt->key.family = family;
+ fib_rt->key.tb_id = tb_id;
+ list_add(&fib_rt->list, &data->fib_rt_list);
+}
+
+static void nsim_fib_rt_fini(struct nsim_fib_rt *fib_rt)
+{
+ list_del(&fib_rt->list);
+}
+
+static struct nsim_fib_rt *nsim_fib_rt_lookup(struct rhashtable *fib_rt_ht,
+ const void *addr, size_t addr_len,
+ unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ struct nsim_fib_rt_key key;
+
+ memset(&key, 0, sizeof(key));
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ key.family = family;
+ key.tb_id = tb_id;
+
+ return rhashtable_lookup_fast(fib_rt_ht, &key, nsim_fib_rt_ht_params);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_create(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_ATOMIC);
+ if (!fib4_rt)
+ return NULL;
+
+ nsim_fib_rt_init(data, &fib4_rt->common, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET, fen_info->tb_id);
+
+ fib4_rt->fi = fen_info->fi;
+ fib_info_hold(fib4_rt->fi);
+ fib4_rt->tos = fen_info->tos;
+ fib4_rt->type = fen_info->type;
+
+ return fib4_rt;
+}
+
+static void nsim_fib4_rt_destroy(struct nsim_fib4_rt *fib4_rt)
+{
+ fib_info_put(fib4_rt->fi);
+ nsim_fib_rt_fini(&fib4_rt->common);
+ kfree(fib4_rt);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET,
+ fen_info->tb_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib4_rt, common);
+}
+
+static void nsim_fib4_rt_hw_flags_set(struct net *net,
+ const struct nsim_fib4_rt *fib4_rt,
+ bool trap)
+{
+ u32 *p_dst = (u32 *) fib4_rt->common.key.addr;
+ int dst_len = fib4_rt->common.key.prefix_len;
+ struct fib_rt_info fri;
+
+ fri.fi = fib4_rt->fi;
+ fri.tb_id = fib4_rt->common.key.tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_rt->tos;
+ fri.type = fib4_rt->type;
+ fri.offload = false;
+ fri.trap = trap;
+ fib_alias_hw_flags_set(net, &fri);
+}
+
+static int nsim_fib4_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_fib_account(&data->ipv4.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv4 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct nsim_fib4_rt *fib4_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib4_rt_old->common.ht_node,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv4 route");
+ return err;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false);
+ nsim_fib4_rt_destroy(fib4_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt, *fib4_rt_old;
+ int err;
+
+ fib4_rt = nsim_fib4_rt_create(data, fen_info);
+ if (!fib4_rt)
+ return -ENOMEM;
+
+ fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (!fib4_rt_old)
+ err = nsim_fib4_rt_add(data, fib4_rt, extack);
+ else
+ err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old, extack);
+
+ if (err)
+ nsim_fib4_rt_destroy(fib4_rt);
+
+ return err;
+}
+
+static void nsim_fib4_rt_remove(struct nsim_fib_data *data,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (WARN_ON_ONCE(!fib4_rt))
+ return;
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static int nsim_fib4_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib_entry_notifier_info *fen_info;
+ int err = 0;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info, info);
+
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib4_rt_insert(data, fen_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib4_rt_remove(data, fen_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct nsim_fib6_rt_nh *
+nsim_fib6_rt_nh_find(const struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) {
+ if (fib6_rt_nh->rt == rt)
+ return fib6_rt_nh;
+ }
+
+ return NULL;
+}
+
+static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
+ struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_ATOMIC);
+ if (!fib6_rt_nh)
+ return -ENOMEM;
+
+ fib6_info_hold(rt);
+ fib6_rt_nh->rt = rt;
+ list_add_tail(&fib6_rt_nh->list, &fib6_rt->nh_list);
+ fib6_rt->nhs++;
+
+ return 0;
+}
+
+static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt);
+ if (WARN_ON_ONCE(!fib6_rt_nh))
+ return;
+
+ fib6_rt->nhs--;
+ list_del(&fib6_rt_nh->list);
+#if IS_ENABLED(CONFIG_IPV6)
+ fib6_info_release(fib6_rt_nh->rt);
+#endif
+ kfree(fib6_rt_nh);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_create(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC);
+ if (!fib6_rt)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+
+ /* We consider a multipath IPv6 route as one entry, but it can be made
+ * up from several fib6_info structs (one for each nexthop), so we
+ * add them all to the same list under the entry.
+ */
+ INIT_LIST_HEAD(&fib6_rt->nh_list);
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ goto err_fib_rt_fini;
+
+ if (!fen6_info->nsiblings)
+ return fib6_rt;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return fib6_rt;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings)
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+err_fib_rt_fini:
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+ return ERR_PTR(err);
+}
+
+static void nsim_fib6_rt_destroy(struct nsim_fib6_rt *fib6_rt)
+{
+ struct nsim_fib6_rt_nh *iter, *tmp;
+
+ list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list)
+ nsim_fib6_rt_nh_del(fib6_rt, iter->rt);
+ WARN_ON_ONCE(!list_empty(&fib6_rt->nh_list));
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib6_rt, common);
+}
+
+static int nsim_fib6_rt_append(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
+ if (WARN_ON_ONCE(!fib6_rt))
+ return -EINVAL;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ return err;
+ rt->trap = true;
+
+ if (!fen6_info->nsiblings)
+ return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ iter->trap = true;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings) {
+ iter->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ }
+ rt->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+ return err;
+}
+
+static void nsim_fib6_rt_hw_flags_set(const struct nsim_fib6_rt *fib6_rt,
+ bool trap)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list)
+ fib6_info_hw_flags_set(fib6_rt_nh->rt, false, trap);
+}
+
+static int nsim_fib6_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = nsim_fib_account(&data->ipv6.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv6 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib6_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct nsim_fib6_rt *fib6_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib6_rt_old->common.ht_node,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv6 route");
+ return err;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt_old, false);
+ nsim_fib6_rt_destroy(fib6_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt, *fib6_rt_old;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_create(data, fen6_info);
+ if (IS_ERR(fib6_rt))
+ return PTR_ERR(fib6_rt);
+
+ fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt_old)
+ err = nsim_fib6_rt_add(data, fib6_rt, extack);
+ else
+ err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old, extack);
+
+ if (err)
+ nsim_fib6_rt_destroy(fib6_rt);
+
+ return err;
+}
+
+static void
+nsim_fib6_rt_remove(struct nsim_fib_data *data,
+ const struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt;
+
+ /* Multipath routes are first added to the FIB trie and only then
+ * notified. If we vetoed the addition, we will get a delete
+ * notification for a route we do not have. Therefore, do not warn if
+ * route was not found.
+ */
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt)
+ return;
+
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+ * group.
+ */
+ if (fen6_info->nsiblings + 1 != fib6_rt->nhs) {
+ nsim_fib6_rt_nh_del(fib6_rt, fen6_info->rt);
+ return;
+ }
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static int nsim_fib6_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ int err = 0;
+
+ fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
+
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ if (fen6_info->rt->fib6_src.plen) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib6_rt_insert(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = nsim_fib6_rt_append(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib6_rt_remove(data, fen6_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
static int nsim_fib_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+ struct fib_notifier_info *info, unsigned long event)
{
- struct netlink_ext_ack *extack = info->extack;
int err = 0;
switch (info->family) {
case AF_INET:
- err = nsim_fib_account(&data->ipv4.fib, add, extack);
+ err = nsim_fib4_event(data, info, event);
break;
case AF_INET6:
- err = nsim_fib_account(&data->ipv6.fib, add, extack);
+ err = nsim_fib6_event(data, info, event);
break;
}
@@ -170,6 +756,9 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
struct fib_notifier_info *info = ptr;
int err = 0;
+ /* IPv6 routes can be added via RAs from softIRQ. */
+ spin_lock_bh(&data->fib_lock);
+
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
@@ -177,25 +766,75 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
event == FIB_EVENT_RULE_ADD);
break;
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(data, info,
- event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info, event);
break;
}
+ spin_unlock_bh(&data->fib_lock);
+
return notifier_from_errno(err);
}
+static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct devlink *devlink = data->devlink;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common);
+ nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false);
+ nsim_fib_account(&data->ipv4.fib, false, NULL);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct nsim_fib6_rt *fib6_rt;
+
+ fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common);
+ nsim_fib6_rt_hw_flags_set(fib6_rt, false);
+ nsim_fib_account(&data->ipv6.fib, false, NULL);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static void nsim_fib_rt_free(void *ptr, void *arg)
+{
+ struct nsim_fib_rt *fib_rt = ptr;
+ struct nsim_fib_data *data = arg;
+
+ switch (fib_rt->key.family) {
+ case AF_INET:
+ nsim_fib4_rt_free(fib_rt, data);
+ break;
+ case AF_INET6:
+ nsim_fib6_rt_free(fib_rt, data);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
fib_nb);
+ struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+
+ /* The notifier block is still not registered, so we do not need to
+ * take any locks here.
+ */
+ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_rt_free(fib_rt, data);
+ }
- data->ipv4.fib.num = 0ULL;
data->ipv4.rules.num = 0ULL;
- data->ipv6.fib.num = 0ULL;
data->ipv6.rules.num = 0ULL;
}
@@ -256,6 +895,13 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
+ data->devlink = devlink;
+
+ spin_lock_init(&data->fib_lock);
+ INIT_LIST_HEAD(&data->fib_rt_list);
+ err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
+ if (err)
+ goto err_data_free;
nsim_fib_set_max_all(data, devlink);
@@ -264,7 +910,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
nsim_fib_dump_inconsistent, extack);
if (err) {
pr_err("Failed to register fib notifier\n");
- goto err_out;
+ goto err_rhashtable_destroy;
}
devlink_resource_occ_get_register(devlink,
@@ -285,7 +931,10 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
data);
return data;
-err_out:
+err_rhashtable_destroy:
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+err_data_free:
kfree(data);
return ERR_PTR(err);
}
@@ -301,5 +950,8 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+ WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
kfree(data);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 5848219005d7..9dabe03a668c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -38,6 +38,7 @@ config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_IOMEM && OF_MDIO
+ default ARCH_BCM_IPROC
help
This module provides a driver for the MDIO busses found in the
Broadcom iProc SoC's.
@@ -324,6 +325,12 @@ config BROADCOM_PHY
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
+config BCM84881_PHY
+ bool "Broadcom BCM84881 PHY"
+ depends on PHYLIB=y
+ ---help---
+ Support the Broadcom BCM84881 PHY.
+
config CICADA_PHY
tristate "Cicada PHYs"
---help---
@@ -340,14 +347,15 @@ config DAVICOM_PHY
Currently supports dm9161e and dm9131
config DP83822_PHY
- tristate "Texas Instruments DP83822 PHY"
+ tristate "Texas Instruments DP83822/825/826 PHYs"
---help---
- Supports the DP83822 PHY.
+ Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
+ DP83826C and DP83826NC PHYs.
config DP83TC811_PHY
- tristate "Texas Instruments DP83TC822 PHY"
+ tristate "Texas Instruments DP83TC811 PHY"
---help---
- Supports the DP83TC822 PHY.
+ Supports the DP83TC811 PHY.
config DP83848_PHY
tristate "Texas Instruments DP83848 PHY"
@@ -431,6 +439,9 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
+ depends on MACSEC || MACSEC=n
+ select CRYPTO_AES
+ select CRYPTO_ECB
---help---
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b433ec3bf9a6..fe5badf13b65 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -43,6 +43,8 @@ obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
+
obj-$(CONFIG_SFP) += sfp.o
sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
@@ -62,6 +64,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index cf5a391c93e6..c7eabe4382fb 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -145,7 +145,7 @@ struct adin_clause45_mmd_map {
u16 adin_regnum;
};
-static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
+static const struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
{ MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG },
{ MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG },
{ MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG },
@@ -159,7 +159,7 @@ struct adin_hw_stat {
u16 reg2;
};
-static struct adin_hw_stat adin_hw_stats[] = {
+static const struct adin_hw_stat adin_hw_stats[] = {
{ "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */
{ "length_error_frames_count", 0x940C },
{ "alignment_error_frames_count", 0x940D },
@@ -456,7 +456,7 @@ static int adin_phy_config_intr(struct phy_device *phydev)
static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
u16 cl45_regnum)
{
- struct adin_clause45_mmd_map *m;
+ const struct adin_clause45_mmd_map *m;
int i;
if (devad == MDIO_MMD_VEND1)
@@ -625,7 +625,7 @@ static int adin_soft_reset(struct phy_device *phydev)
if (rc < 0)
return rc;
- msleep(10);
+ msleep(20);
/* If we get a read error something may be wrong */
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
@@ -650,7 +650,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data)
}
static int adin_read_mmd_stat_regs(struct phy_device *phydev,
- struct adin_hw_stat *stat,
+ const struct adin_hw_stat *stat,
u32 *val)
{
int ret;
@@ -676,7 +676,7 @@ static int adin_read_mmd_stat_regs(struct phy_device *phydev,
static u64 adin_get_stat(struct phy_device *phydev, int i)
{
- struct adin_hw_stat *stat = &adin_hw_stats[i];
+ const struct adin_hw_stat *stat = &adin_hw_stats[i];
struct adin_priv *priv = phydev->priv;
u32 val;
int ret;
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 975789d9349d..31927b2c7d5a 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -358,9 +358,11 @@ static int aqr107_read_status(struct phy_device *phydev)
switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
@@ -493,7 +495,8 @@ static int aqr107_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
- phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ phydev->interface != PHY_INTERFACE_MODE_10GKR &&
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
new file mode 100644
index 000000000000..14d55a77eb28
--- /dev/null
+++ b/drivers/net/phy/bcm84881.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+// Broadcom BCM84881 NBASE-T PHY driver, as found on a SFP+ module.
+// Copyright (C) 2019 Russell King, Deep Blue Solutions Ltd.
+//
+// Like the Marvell 88x3310, the Broadcom 84881 changes its host-side
+// interface according to the operating speed between 10GBASE-R,
+// 2500BASE-X and SGMII (but unlike the 88x3310, without the control
+// word).
+//
+// This driver only supports those aspects of the PHY that I'm able to
+// observe and test with the SFP+ module, which is an incomplete subset
+// of what this PHY is able to support. For example, I only assume it
+// supports a single lane Serdes connection, but it may be that the PHY
+// is able to support more than that.
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+enum {
+ MDIO_AN_C22 = 0xffe0,
+};
+
+static int bcm84881_wait_init(struct phy_device *phydev)
+{
+ unsigned int tries = 20;
+ int ret, val;
+
+ do {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ if (val < 0) {
+ ret = val;
+ break;
+ }
+ if (!(val & MDIO_CTRL1_RESET)) {
+ ret = 0;
+ break;
+ }
+ if (!--tries) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ msleep(100);
+ } while (1);
+
+ if (ret)
+ phydev_err(phydev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int bcm84881_config_init(struct phy_device *phydev)
+{
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int bcm84881_probe(struct phy_device *phydev)
+{
+ /* This driver requires PMAPMD and AN blocks */
+ const u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ if (!phydev->is_c45 ||
+ (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int bcm84881_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* Although the PHY sets bit 1.11.8, it does not support 10M modes */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int bcm84881_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ /* Wait for the PHY to finish initialising, otherwise our
+ * advertisement may be overwritten.
+ */
+ ret = bcm84881_wait_init(phydev);
+ if (ret)
+ return ret;
+
+ /* We don't support manual MDI control */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* disabled autoneg doesn't seem to work with this PHY */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_C22 + MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int bcm84881_aneg_done(struct phy_device *phydev)
+{
+ int bmsr, val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+ return val;
+
+ return !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+}
+
+static int bcm84881_read_status(struct phy_device *phydev)
+{
+ unsigned int mode;
+ int bmsr, val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_CTRL1_RESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+ return val;
+
+ phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+ phydev->link = !!(val & MDIO_STAT1_LSTATUS) &&
+ !!(bmsr & BMSR_LSTATUS);
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = false;
+
+ if (!phydev->link)
+ return 0;
+
+ linkmode_zero(phydev->lp_advertising);
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->mdix = 0;
+
+ if (phydev->autoneg_complete) {
+ val = genphy_c45_read_lpa(phydev);
+ if (val < 0)
+ return val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_C22 + MII_STAT1000);
+ if (val < 0)
+ return val;
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ phy_resolve_aneg_linkmode(phydev);
+ }
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* disabled autoneg doesn't seem to work, so force the link
+ * down.
+ */
+ phydev->link = 0;
+ return 0;
+ }
+
+ /* Set the host link mode - we set the phy interface mode and
+ * the speed according to this register so that downshift works.
+ * We leave the duplex setting as per the resolution from the
+ * above.
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, 0x4011);
+ mode = (val & 0x1e) >> 1;
+ if (mode == 1 || mode == 2)
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ else if (mode == 3)
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ else if (mode == 4)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ switch (mode & 7) {
+ case 1:
+ phydev->speed = SPEED_100;
+ break;
+ case 2:
+ phydev->speed = SPEED_1000;
+ break;
+ case 3:
+ phydev->speed = SPEED_10000;
+ break;
+ case 4:
+ phydev->speed = SPEED_2500;
+ break;
+ case 5:
+ phydev->speed = SPEED_5000;
+ break;
+ }
+
+ return genphy_c45_read_mdix(phydev);
+}
+
+static struct phy_driver bcm84881_drivers[] = {
+ {
+ .phy_id = 0xae025150,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM84881",
+ .config_init = bcm84881_config_init,
+ .probe = bcm84881_probe,
+ .get_features = bcm84881_get_features,
+ .config_aneg = bcm84881_config_aneg,
+ .aneg_done = bcm84881_aneg_done,
+ .read_status = bcm84881_read_status,
+ },
+};
+
+module_phy_driver(bcm84881_drivers);
+
+/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
+static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+ { 0xae025150, 0xfffffff0 },
+ { },
+};
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Broadcom BCM84881 PHY driver");
+MODULE_DEVICE_TABLE(mdio, bcm84881_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 8f241b57fcf6..ac72a324fcd1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -98,6 +98,7 @@ struct dp83640_private {
struct list_head list;
struct dp83640_clock *clock;
struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
struct delayed_work ts_work;
int hwts_tx_en;
int hwts_rx_en;
@@ -1131,96 +1132,6 @@ static void dp83640_clock_put(struct dp83640_clock *clock)
mutex_unlock(&clock->clock_lock);
}
-static int dp83640_probe(struct phy_device *phydev)
-{
- struct dp83640_clock *clock;
- struct dp83640_private *dp83640;
- int err = -ENOMEM, i;
-
- if (phydev->mdio.addr == BROADCAST_ADDR)
- return 0;
-
- clock = dp83640_clock_get_bus(phydev->mdio.bus);
- if (!clock)
- goto no_clock;
-
- dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
- if (!dp83640)
- goto no_memory;
-
- dp83640->phydev = phydev;
- INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
-
- INIT_LIST_HEAD(&dp83640->rxts);
- INIT_LIST_HEAD(&dp83640->rxpool);
- for (i = 0; i < MAX_RXTS; i++)
- list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
-
- phydev->priv = dp83640;
-
- spin_lock_init(&dp83640->rx_lock);
- skb_queue_head_init(&dp83640->rx_queue);
- skb_queue_head_init(&dp83640->tx_queue);
-
- dp83640->clock = clock;
-
- if (choose_this_phy(clock, phydev)) {
- clock->chosen = dp83640;
- clock->ptp_clock = ptp_clock_register(&clock->caps,
- &phydev->mdio.dev);
- if (IS_ERR(clock->ptp_clock)) {
- err = PTR_ERR(clock->ptp_clock);
- goto no_register;
- }
- } else
- list_add_tail(&dp83640->list, &clock->phylist);
-
- dp83640_clock_put(clock);
- return 0;
-
-no_register:
- clock->chosen = NULL;
- kfree(dp83640);
-no_memory:
- dp83640_clock_put(clock);
-no_clock:
- return err;
-}
-
-static void dp83640_remove(struct phy_device *phydev)
-{
- struct dp83640_clock *clock;
- struct list_head *this, *next;
- struct dp83640_private *tmp, *dp83640 = phydev->priv;
-
- if (phydev->mdio.addr == BROADCAST_ADDR)
- return;
-
- enable_status_frames(phydev, false);
- cancel_delayed_work_sync(&dp83640->ts_work);
-
- skb_queue_purge(&dp83640->rx_queue);
- skb_queue_purge(&dp83640->tx_queue);
-
- clock = dp83640_clock_get(dp83640->clock);
-
- if (dp83640 == clock->chosen) {
- ptp_clock_unregister(clock->ptp_clock);
- clock->chosen = NULL;
- } else {
- list_for_each_safe(this, next, &clock->phylist) {
- tmp = list_entry(this, struct dp83640_private, list);
- if (tmp == dp83640) {
- list_del_init(&tmp->list);
- break;
- }
- }
- }
-
- dp83640_clock_put(clock);
- kfree(dp83640);
-}
-
static int dp83640_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -1319,9 +1230,10 @@ static int dp83640_config_intr(struct phy_device *phydev)
}
}
-static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
{
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
struct hwtstamp_config cfg;
u16 txcfg0, rxcfg0;
@@ -1397,8 +1309,8 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
- ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
+ ext_write(0, dp83640->phydev, PAGE5, PTP_TXCFG0, txcfg0);
+ ext_write(0, dp83640->phydev, PAGE5, PTP_RXCFG0, rxcfg0);
mutex_unlock(&dp83640->clock->extreg_lock);
@@ -1428,10 +1340,11 @@ static void rx_timestamp_work(struct work_struct *work)
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
}
-static bool dp83640_rxtstamp(struct phy_device *phydev,
+static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
struct list_head *this, *next;
struct rxts *rxts;
@@ -1477,11 +1390,12 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
return true;
}
-static void dp83640_txtstamp(struct phy_device *phydev,
+static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
switch (dp83640->hwts_tx_en) {
@@ -1504,9 +1418,11 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
}
-static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
+static int dp83640_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
{
- struct dp83640_private *dp83640 = dev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1526,6 +1442,103 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
return 0;
}
+static int dp83640_probe(struct phy_device *phydev)
+{
+ struct dp83640_clock *clock;
+ struct dp83640_private *dp83640;
+ int err = -ENOMEM, i;
+
+ if (phydev->mdio.addr == BROADCAST_ADDR)
+ return 0;
+
+ clock = dp83640_clock_get_bus(phydev->mdio.bus);
+ if (!clock)
+ goto no_clock;
+
+ dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
+ if (!dp83640)
+ goto no_memory;
+
+ dp83640->phydev = phydev;
+ dp83640->mii_ts.rxtstamp = dp83640_rxtstamp;
+ dp83640->mii_ts.txtstamp = dp83640_txtstamp;
+ dp83640->mii_ts.hwtstamp = dp83640_hwtstamp;
+ dp83640->mii_ts.ts_info = dp83640_ts_info;
+
+ INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
+ INIT_LIST_HEAD(&dp83640->rxts);
+ INIT_LIST_HEAD(&dp83640->rxpool);
+ for (i = 0; i < MAX_RXTS; i++)
+ list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
+
+ phydev->mii_ts = &dp83640->mii_ts;
+ phydev->priv = dp83640;
+
+ spin_lock_init(&dp83640->rx_lock);
+ skb_queue_head_init(&dp83640->rx_queue);
+ skb_queue_head_init(&dp83640->tx_queue);
+
+ dp83640->clock = clock;
+
+ if (choose_this_phy(clock, phydev)) {
+ clock->chosen = dp83640;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto no_register;
+ }
+ } else
+ list_add_tail(&dp83640->list, &clock->phylist);
+
+ dp83640_clock_put(clock);
+ return 0;
+
+no_register:
+ clock->chosen = NULL;
+ kfree(dp83640);
+no_memory:
+ dp83640_clock_put(clock);
+no_clock:
+ return err;
+}
+
+static void dp83640_remove(struct phy_device *phydev)
+{
+ struct dp83640_clock *clock;
+ struct list_head *this, *next;
+ struct dp83640_private *tmp, *dp83640 = phydev->priv;
+
+ if (phydev->mdio.addr == BROADCAST_ADDR)
+ return;
+
+ phydev->mii_ts = NULL;
+
+ enable_status_frames(phydev, false);
+ cancel_delayed_work_sync(&dp83640->ts_work);
+
+ skb_queue_purge(&dp83640->rx_queue);
+ skb_queue_purge(&dp83640->tx_queue);
+
+ clock = dp83640_clock_get(dp83640->clock);
+
+ if (dp83640 == clock->chosen) {
+ ptp_clock_unregister(clock->ptp_clock);
+ clock->chosen = NULL;
+ } else {
+ list_for_each_safe(this, next, &clock->phylist) {
+ tmp = list_entry(this, struct dp83640_private, list);
+ if (tmp == dp83640) {
+ list_del_init(&tmp->list);
+ break;
+ }
+ }
+ }
+
+ dp83640_clock_put(clock);
+ kfree(dp83640);
+}
+
static struct phy_driver dp83640_driver = {
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
@@ -1537,10 +1550,6 @@ static struct phy_driver dp83640_driver = {
.config_init = dp83640_config_init,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
- .ts_info = dp83640_ts_info,
- .hwtstamp = dp83640_hwtstamp,
- .rxtstamp = dp83640_rxtstamp,
- .txtstamp = dp83640_txtstamp,
};
static int __init dp83640_init(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 8a4b1d167ce2..fe9aa3ad52a7 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Texas Instruments DP83822 PHY
+/* Driver for the Texas Instruments DP83822, DP83825 and DP83826 PHYs.
*
* Copyright (C) 2017 Texas Instruments Inc.
*/
@@ -15,7 +14,12 @@
#include <linux/netdevice.h>
#define DP83822_PHY_ID 0x2000a240
+#define DP83825S_PHY_ID 0x2000a140
#define DP83825I_PHY_ID 0x2000a150
+#define DP83825CM_PHY_ID 0x2000a160
+#define DP83825CS_PHY_ID 0x2000a170
+#define DP83826C_PHY_ID 0x2000a130
+#define DP83826NC_PHY_ID 0x2000a110
#define DP83822_DEVADDR 0x1f
@@ -319,12 +323,22 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
+ DP83822_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83822_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83822_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
+ DP83822_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
+ DP83822_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
};
module_phy_driver(dp83822_driver);
static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
+ { DP83826C_PHY_ID, 0xfffffff0 },
+ { DP83826NC_PHY_ID, 0xfffffff0 },
+ { DP83825S_PHY_ID, 0xfffffff0 },
+ { DP83825CM_PHY_ID, 0xfffffff0 },
+ { DP83825CS_PHY_ID, 0xfffffff0 },
{ },
};
MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9cd9dcee4eb2..967f57ed0b65 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -93,10 +93,13 @@
#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
/* PHY CTRL bits */
-#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT 12
#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
-#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_PHYCR_TX_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_PHYCR_RX_FIFO_DEPTH_MASK GENMASK(13, 12)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
+#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
@@ -131,7 +134,8 @@ enum {
struct dp83867_private {
u32 rx_id_delay;
u32 tx_id_delay;
- u32 fifo_depth;
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
int io_impedance;
int port_mirroring;
bool rxctrl_strap_quirk;
@@ -408,18 +412,32 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
ret = of_property_read_u32(of_node, "ti,fifo-depth",
- &dp83867->fifo_depth);
+ &dp83867->tx_fifo_depth);
if (ret) {
- phydev_err(phydev,
- "ti,fifo-depth property is required\n");
- return ret;
+ ret = of_property_read_u32(of_node, "tx-fifo-depth",
+ &dp83867->tx_fifo_depth);
+ if (ret)
+ dp83867->tx_fifo_depth =
+ DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
}
- if (dp83867->fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
- phydev_err(phydev,
- "ti,fifo-depth value %u out of range\n",
- dp83867->fifo_depth);
+
+ if (dp83867->tx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev, "tx-fifo-depth value %u out of range\n",
+ dp83867->tx_fifo_depth);
return -EINVAL;
}
+
+ ret = of_property_read_u32(of_node, "rx-fifo-depth",
+ &dp83867->rx_fifo_depth);
+ if (ret)
+ dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ if (dp83867->rx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev, "rx-fifo-depth value %u out of range\n",
+ dp83867->rx_fifo_depth);
+ return -EINVAL;
+ }
+
return 0;
}
#else
@@ -458,12 +476,31 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
BIT(7));
+ if (phy_interface_is_rgmii(phydev) ||
+ phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+
+ val &= ~DP83867_PHYCR_TX_FIFO_DEPTH_MASK;
+ val |= (dp83867->tx_fifo_depth <<
+ DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~DP83867_PHYCR_RX_FIFO_DEPTH_MASK;
+ val |= (dp83867->rx_fifo_depth <<
+ DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT);
+ }
+
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+ }
+
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
if (val < 0)
return val;
- val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
- val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
/* The code below checks if "port mirroring" N/A MODE4 has been
* enabled during power on bootstrap.
@@ -599,7 +636,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
usleep_range(10, 20);
- return 0;
+ /* After reset FORCE_LINK_GOOD bit is set. Although the
+ * default value should be unset. Disable FORCE_LINK_GOOD
+ * for the phy to work properly.
+ */
+ return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
static struct phy_driver dp83867_driver[] = {
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 93021904c5e4..7996a4aea8d2 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -334,7 +334,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
break;
default:
return -EINVAL;
- };
+ }
return ret;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 7c5265fd2b94..4a3d34f40cb9 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -210,18 +210,15 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
* Linux device associated with it, we simply have obtain
* the GPIO descriptor from the device tree like this.
*/
- gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
- GPIOD_IN, "mdio");
- of_node_put(fixed_link_node);
- if (IS_ERR(gpiod)) {
- if (PTR_ERR(gpiod) == -EPROBE_DEFER)
- return gpiod;
-
+ gpiod = fwnode_gpiod_get_index(of_fwnode_handle(fixed_link_node),
+ "link", 0, GPIOD_IN, "mdio");
+ if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
if (PTR_ERR(gpiod) != -ENOENT)
pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
fixed_link_node);
gpiod = NULL;
}
+ of_node_put(fixed_link_node);
return gpiod;
}
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 356bd6472f49..fec58ad69e02 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -190,27 +190,11 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
}
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_pause(phydev);
} else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ err = genphy_read_status_fixed(phydev);
+ if (err < 0)
+ return err;
phydev->pause = phydev->asym_pause = 0;
linkmode_zero(phydev->lp_advertising);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b1fbd1937328..28e33ece4ce1 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -162,19 +162,9 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
-#define LPA_FIBER_1000HALF 0x40
-#define LPA_FIBER_1000FULL 0x20
-
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
-#define ADVERTISE_FIBER_1000HALF 0x40
-#define ADVERTISE_FIBER_1000FULL 0x20
-
-#define ADVERTISE_PAUSE_FIBER 0x180
-#define ADVERTISE_PAUSE_ASYM_FIBER 0x100
-
-#define REGISTER_LINK_STATUS 0x400
#define NB_FIBER_STATS 1
MODULE_DESCRIPTION("Marvell PHY driver");
@@ -497,16 +487,15 @@ static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
u32 result = 0;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
- result |= ADVERTISE_FIBER_1000HALF;
+ result |= ADVERTISE_1000XHALF;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
- result |= ADVERTISE_FIBER_1000FULL;
+ result |= ADVERTISE_1000XFULL;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
- result |= LPA_PAUSE_ASYM_FIBER;
+ result |= ADVERTISE_1000XPSE_ASYM;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
- result |= (ADVERTISE_PAUSE_FIBER
- & (~ADVERTISE_PAUSE_ASYM_FIBER));
+ result |= ADVERTISE_1000XPAUSE;
return result;
}
@@ -524,7 +513,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
{
int changed = 0;
int err;
- int adv, oldadv;
+ u16 adv;
if (phydev->autoneg != AUTONEG_ENABLE)
return genphy_setup_forced(phydev);
@@ -533,44 +522,19 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- /* Setup fiber advertisement */
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
- | LPA_PAUSE_FIBER);
- adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_ADVERTISE, adv);
- if (err < 0)
- return err;
+ adv = linkmode_adv_to_fiber_adv_t(phydev->advertising);
+ /* Setup fiber advertisement */
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XHALF | ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM,
+ adv);
+ if (err < 0)
+ return err;
+ if (err > 0)
changed = 1;
- }
-
- if (changed == 0) {
- /* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated?
- */
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = 1; /* do restart aneg */
- }
- /* Only restart aneg if we are advertising something different
- * than we were before.
- */
- if (changed > 0)
- changed = genphy_restart_aneg(phydev);
-
- return changed;
+ return genphy_check_and_restart_aneg(phydev, changed);
}
static int m88e1510_config_aneg(struct phy_device *phydev)
@@ -1302,93 +1266,29 @@ static int m88e6390_config_aneg(struct phy_device *phydev)
static void fiber_lpa_mod_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
{
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- advertising, lpa & LPA_FIBER_1000HALF);
+ advertising, lpa & LPA_1000XHALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- advertising, lpa & LPA_FIBER_1000FULL);
-}
-
-/**
- * marvell_update_link - update link status in real time in @phydev
- * @phydev: target phy_device struct
- *
- * Description: Update the value in phydev->link to reflect the
- * current link value.
- */
-static int marvell_update_link(struct phy_device *phydev, int fiber)
-{
- int status;
-
- /* Use the generic register for copper link, or specific
- * register for fiber case
- */
- if (fiber) {
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
-
- if ((status & REGISTER_LINK_STATUS) == 0)
- phydev->link = 0;
- else
- phydev->link = 1;
- } else {
- return genphy_update_link(phydev);
- }
-
- return 0;
+ advertising, lpa & LPA_1000XFULL);
}
static int marvell_read_status_page_an(struct phy_device *phydev,
- int fiber)
+ int fiber, int status)
{
- int status;
int lpa;
- int lpagb;
-
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
-
- lpa = phy_read(phydev, MII_LPA);
- if (lpa < 0)
- return lpa;
-
- lpagb = phy_read(phydev, MII_STAT1000);
- if (lpagb < 0)
- return lpagb;
-
- if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- status = status & MII_M1011_PHY_STATUS_SPD_MASK;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- switch (status) {
- case MII_M1011_PHY_STATUS_1000:
- phydev->speed = SPEED_1000;
- break;
-
- case MII_M1011_PHY_STATUS_100:
- phydev->speed = SPEED_100;
- break;
-
- default:
- phydev->speed = SPEED_10;
- break;
- }
+ int err;
if (!fiber) {
- mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb);
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_pause(phydev);
} else {
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
/* The fiber link is only 1000M capable */
fiber_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
@@ -1405,31 +1305,25 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
}
}
- return 0;
-}
-static int marvell_read_status_page_fixed(struct phy_device *phydev)
-{
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- if (bmcr & BMCR_SPEED1000)
+ switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
+ case MII_M1011_PHY_STATUS_1000:
phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
+ break;
+
+ case MII_M1011_PHY_STATUS_100:
phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ break;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- linkmode_zero(phydev->lp_advertising);
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
return 0;
}
@@ -1444,25 +1338,38 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
*/
static int marvell_read_status_page(struct phy_device *phydev, int page)
{
+ int status;
int fiber;
int err;
- /* Detect and update the link, but return if there
- * was an error
+ status = phy_read(phydev, MII_M1011_PHY_STATUS);
+ if (status < 0)
+ return status;
+
+ /* Use the generic register for copper link status,
+ * and the PHY status register for fiber link status.
*/
+ if (page == MII_MARVELL_FIBER_PAGE) {
+ phydev->link = !!(status & MII_M1011_PHY_STATUS_LINK);
+ } else {
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+ }
+
if (page == MII_MARVELL_FIBER_PAGE)
fiber = 1;
else
fiber = 0;
- err = marvell_update_link(phydev, fiber);
- if (err)
- return err;
+ linkmode_zero(phydev->lp_advertising);
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (phydev->autoneg == AUTONEG_ENABLE)
- err = marvell_read_status_page_an(phydev, fiber);
+ err = marvell_read_status_page_an(phydev, fiber, status);
else
- err = marvell_read_status_page_fixed(phydev);
+ err = genphy_read_status_fixed(phydev);
return err;
}
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1bf13017d288..64c9f3bba2cd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -214,9 +214,9 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phy_interface_t iface;
sfp_parse_support(phydev->sfp_bus, id, support);
- iface = sfp_select_interface(phydev->sfp_bus, id, support);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
- if (iface != PHY_INTERFACE_MODE_10GKR) {
+ if (iface != PHY_INTERFACE_MODE_10GBASER) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
return -EINVAL;
}
@@ -304,7 +304,7 @@ static int mv3310_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
- phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
return 0;
@@ -386,16 +386,17 @@ static void mv3310_update_interface(struct phy_device *phydev)
{
if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ phydev->interface == PHY_INTERFACE_MODE_10GBASER) &&
+ phydev->link) {
/* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII, 10GBase-KR and
+ * active PHYXS instance) between Cisco SGMII, 10GBase-R and
* 2500BaseX modes according to the speed. Florian suggests
* setting phydev->interface to communicate this to the MAC.
* Only do this if we are already in one of the above modes.
*/
switch (phydev->speed) {
case SPEED_10000:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case SPEED_2500:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
index 0dce67672548..0746e2cc39ae 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/phy/mdio-i2c.c
@@ -33,17 +33,24 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
- u8 data[2], dev_addr = reg;
+ u8 addr[3], data[2], *p;
int bus_addr, ret;
if (!i2c_mii_valid_phy_id(phy_id))
return 0xffff;
+ p = addr;
+ if (reg & MII_ADDR_C45) {
+ *p++ = 0x20 | ((reg >> 16) & 31);
+ *p++ = reg >> 8;
+ }
+ *p++ = reg;
+
bus_addr = i2c_mii_phy_addr(phy_id);
msgs[0].addr = bus_addr;
msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &dev_addr;
+ msgs[0].len = p - addr;
+ msgs[0].buf = addr;
msgs[1].addr = bus_addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = sizeof(data);
@@ -61,18 +68,23 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
int ret;
- u8 data[3];
+ u8 data[5], *p;
if (!i2c_mii_valid_phy_id(phy_id))
return 0;
- data[0] = reg;
- data[1] = val >> 8;
- data[2] = val;
+ p = data;
+ if (reg & MII_ADDR_C45) {
+ *p++ = (reg >> 16) & 31;
+ *p++ = reg >> 8;
+ }
+ *p++ = reg;
+ *p++ = val >> 8;
+ *p++ = val;
msg.addr = i2c_mii_phy_addr(phy_id);
msg.flags = 0;
- msg.len = 3;
+ msg.len = p - data;
msg.buf = data;
ret = i2c_transfer(i2c, &msg, 1);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 229e480179ff..9bb9f37f21dc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -59,17 +59,11 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
static int mdiobus_register_reset(struct mdio_device *mdiodev)
{
- struct reset_control *reset = NULL;
-
- if (mdiodev->dev.of_node)
- reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
- "phy");
- if (IS_ERR(reset)) {
- if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
- reset = NULL;
- else
- return PTR_ERR(reset);
- }
+ struct reset_control *reset;
+
+ reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
mdiodev->reset_ctrl = reset;
@@ -164,9 +158,11 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
if (size)
bus->priv = (void *)bus + aligned_size;
- /* Initialise the interrupts to polling */
- for (i = 0; i < PHY_MAX_ADDR; i++)
+ /* Initialise the interrupts to polling and 64-bit seqcounts */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
bus->irq[i] = PHY_POLL;
+ u64_stats_init(&bus->stats[i].syncp);
+ }
return bus;
}
@@ -255,9 +251,215 @@ static void mdiobus_release(struct device *d)
kfree(bus);
}
+struct mdio_bus_stat_attr {
+ int addr;
+ unsigned int field_offset;
+};
+
+static u64 mdio_bus_get_stat(struct mdio_bus_stats *s, unsigned int offset)
+{
+ const char *p = (const char *)s + offset;
+ unsigned int start;
+ u64 val = 0;
+
+ do {
+ start = u64_stats_fetch_begin(&s->syncp);
+ val = u64_stats_read((const u64_stats_t *)p);
+ } while (u64_stats_fetch_retry(&s->syncp, start));
+
+ return val;
+}
+
+static u64 mdio_bus_get_global_stat(struct mii_bus *bus, unsigned int offset)
+{
+ unsigned int i;
+ u64 val = 0;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ val += mdio_bus_get_stat(&bus->stats[i], offset);
+
+ return val;
+}
+
+static ssize_t mdio_bus_stat_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mii_bus *bus = to_mii_bus(dev);
+ struct mdio_bus_stat_attr *sattr;
+ struct dev_ext_attribute *eattr;
+ u64 val;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ sattr = eattr->var;
+
+ if (sattr->addr < 0)
+ val = mdio_bus_get_global_stat(bus, sattr->field_offset);
+ else
+ val = mdio_bus_get_stat(&bus->stats[sattr->addr],
+ sattr->field_offset);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct mii_bus *bus = mdiodev->bus;
+ struct mdio_bus_stat_attr *sattr;
+ struct dev_ext_attribute *eattr;
+ int addr = mdiodev->addr;
+ u64 val;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ sattr = eattr->var;
+
+ val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+#define MDIO_BUS_STATS_ATTR_DECL(field, file) \
+static struct dev_ext_attribute dev_attr_mdio_bus_##field = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ -1, offsetof(struct mdio_bus_stats, field) \
+ }), \
+}; \
+static struct dev_ext_attribute dev_attr_mdio_bus_device_##field = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_device_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ -1, offsetof(struct mdio_bus_stats, field) \
+ }), \
+};
+
+#define MDIO_BUS_STATS_ATTR(field) \
+ MDIO_BUS_STATS_ATTR_DECL(field, __stringify(field))
+
+MDIO_BUS_STATS_ATTR(transfers);
+MDIO_BUS_STATS_ATTR(errors);
+MDIO_BUS_STATS_ATTR(writes);
+MDIO_BUS_STATS_ATTR(reads);
+
+#define MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, file) \
+static struct dev_ext_attribute dev_attr_mdio_bus_addr_##field##_##addr = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ addr, offsetof(struct mdio_bus_stats, field) \
+ }), \
+}
+
+#define MDIO_BUS_STATS_ADDR_ATTR(field, addr) \
+ MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, \
+ __stringify(field) "_" __stringify(addr))
+
+#define MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(addr) \
+ MDIO_BUS_STATS_ADDR_ATTR(transfers, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(errors, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(writes, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(reads, addr) \
+
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(0);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(1);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(2);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(3);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(4);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(5);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(6);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(7);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(8);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(9);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(10);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(11);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(12);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(13);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(14);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(15);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(16);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(17);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(18);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(19);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(20);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(21);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(22);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(23);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(24);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(25);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(26);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(27);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(28);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(29);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(30);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(31);
+
+#define MDIO_BUS_STATS_ADDR_ATTR_GROUP(addr) \
+ &dev_attr_mdio_bus_addr_transfers_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_errors_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_writes_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_reads_##addr.attr.attr \
+
+static struct attribute *mdio_bus_statistics_attrs[] = {
+ &dev_attr_mdio_bus_transfers.attr.attr,
+ &dev_attr_mdio_bus_errors.attr.attr,
+ &dev_attr_mdio_bus_writes.attr.attr,
+ &dev_attr_mdio_bus_reads.attr.attr,
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(0),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(1),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(2),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(3),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(4),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(5),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(6),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(7),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(8),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(9),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(10),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(11),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(12),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(13),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(14),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(15),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(16),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(17),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(18),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(19),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(20),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(21),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(22),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(23),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(24),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(25),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(26),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(27),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(28),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(29),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(30),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(31),
+ NULL,
+};
+
+static const struct attribute_group mdio_bus_statistics_group = {
+ .name = "statistics",
+ .attrs = mdio_bus_statistics_attrs,
+};
+
+static const struct attribute_group *mdio_bus_groups[] = {
+ &mdio_bus_statistics_group,
+ NULL,
+};
+
static struct class mdio_bus_class = {
.name = "mdio_bus",
.dev_release = mdiobus_release,
+ .dev_groups = mdio_bus_groups,
};
#if IS_ENABLED(CONFIG_OF_MDIO)
@@ -536,6 +738,24 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
}
EXPORT_SYMBOL(mdiobus_scan);
+static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
+{
+ u64_stats_update_begin(&stats->syncp);
+
+ u64_stats_inc(&stats->transfers);
+ if (ret < 0) {
+ u64_stats_inc(&stats->errors);
+ goto out;
+ }
+
+ if (op)
+ u64_stats_inc(&stats->reads);
+ else
+ u64_stats_inc(&stats->writes);
+out:
+ u64_stats_update_end(&stats->syncp);
+}
+
/**
* __mdiobus_read - Unlocked version of the mdiobus_read function
* @bus: the mii_bus struct
@@ -555,6 +775,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
retval = bus->read(bus, addr, regnum);
trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+ mdiobus_stats_acct(&bus->stats[addr], true, retval);
return retval;
}
@@ -580,6 +801,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
err = bus->write(bus, addr, regnum, val);
trace_mdio_access(bus, 0, addr, regnum, val, err);
+ mdiobus_stats_acct(&bus->stats[addr], false, err);
return err;
}
@@ -725,8 +947,27 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static struct attribute *mdio_bus_device_statistics_attrs[] = {
+ &dev_attr_mdio_bus_device_transfers.attr.attr,
+ &dev_attr_mdio_bus_device_errors.attr.attr,
+ &dev_attr_mdio_bus_device_writes.attr.attr,
+ &dev_attr_mdio_bus_device_reads.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mdio_bus_device_statistics_group = {
+ .name = "statistics",
+ .attrs = mdio_bus_device_statistics_attrs,
+};
+
+static const struct attribute_group *mdio_bus_dev_groups[] = {
+ &mdio_bus_device_statistics_group,
+ NULL,
+};
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
+ .dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
.uevent = mdio_uevent,
};
diff --git a/drivers/net/phy/mii_timestamper.c b/drivers/net/phy/mii_timestamper.c
new file mode 100644
index 000000000000..2f12c5d901df
--- /dev/null
+++ b/drivers/net/phy/mii_timestamper.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Support for generic time stamping devices on MII buses.
+// Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+//
+
+#include <linux/mii_timestamper.h>
+
+static LIST_HEAD(mii_timestamping_devices);
+static DEFINE_MUTEX(tstamping_devices_lock);
+
+struct mii_timestamping_desc {
+ struct list_head list;
+ struct mii_timestamping_ctrl *ctrl;
+ struct device *device;
+};
+
+/**
+ * register_mii_tstamp_controller() - registers an MII time stamping device.
+ *
+ * @device: The device to be registered.
+ * @ctrl: Pointer to device's control interface.
+ *
+ * Returns zero on success or non-zero on failure.
+ */
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ struct mii_timestamping_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&desc->list);
+ desc->ctrl = ctrl;
+ desc->device = device;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_add_tail(&mii_timestamping_devices, &desc->list);
+ mutex_unlock(&tstamping_devices_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(register_mii_tstamp_controller);
+
+/**
+ * unregister_mii_tstamp_controller() - unregisters an MII time stamping device.
+ *
+ * @device: A device previously passed to register_mii_tstamp_controller().
+ */
+void unregister_mii_tstamp_controller(struct device *device)
+{
+ struct mii_timestamping_desc *desc;
+ struct list_head *this, *next;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each_safe(this, next, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device == device) {
+ list_del_init(&desc->list);
+ kfree(desc);
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+}
+EXPORT_SYMBOL(unregister_mii_tstamp_controller);
+
+/**
+ * register_mii_timestamper - Enables a given port of an MII time stamper.
+ *
+ * @node: The device tree node of the MII time stamp controller.
+ * @port: The index of the port to be enabled.
+ *
+ * Returns a valid interface on success or ERR_PTR otherwise.
+ */
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ struct mii_timestamper *mii_ts = NULL;
+ struct mii_timestamping_desc *desc;
+ struct list_head *this;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each(this, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device->of_node == node) {
+ mii_ts = desc->ctrl->probe_channel(desc->device, port);
+ if (!IS_ERR(mii_ts)) {
+ mii_ts->device = desc->device;
+ get_device(desc->device);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+
+ return mii_ts ? mii_ts : ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(register_mii_timestamper);
+
+/**
+ * unregister_mii_timestamper - Disables a given MII time stamper.
+ *
+ * @mii_ts: An interface obtained via register_mii_timestamper().
+ *
+ */
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+ struct mii_timestamping_desc *desc;
+ struct list_head *this;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each(this, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device == mii_ts->device) {
+ desc->ctrl->release_channel(desc->device, mii_ts);
+ put_device(desc->device);
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+}
+EXPORT_SYMBOL(unregister_mii_timestamper);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index d5f8f351d9ef..937ac7da2789 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -18,6 +18,17 @@
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+#include <linux/scatterlist.h>
+#include <crypto/skcipher.h>
+
+#if IS_ENABLED(CONFIG_MACSEC)
+#include <net/macsec.h>
+#endif
+
+#include "mscc_macsec.h"
+#include "mscc_mac.h"
+#include "mscc_fc_buffer.h"
+
enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_0_2_NS = 0,
RGMII_RX_CLK_DELAY_0_8_NS = 1,
@@ -69,7 +80,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_EXT_PHY_CNTL_2 24
#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa000
+#define MII_VSC85XX_INT_MASK_MASK 0xa020
#define MII_VSC85XX_INT_MASK_WOL 0x0040
#define MII_VSC85XX_INT_STATUS 26
@@ -121,6 +132,26 @@ enum rgmii_rx_clock_delay {
#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
#define PHY_S6G_PLL_FSM_ENA_POS 7
+#define MSCC_EXT_PAGE_MACSEC_17 17
+#define MSCC_EXT_PAGE_MACSEC_18 18
+
+#define MSCC_EXT_PAGE_MACSEC_19 19
+#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
+#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_MACSEC_19_READ BIT(14)
+#define MSCC_PHY_MACSEC_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_MACSEC_20 20
+#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
+enum macsec_bank {
+ FC_BUFFER = 0x04,
+ HOST_MAC = 0x05,
+ LINE_MAC = 0x06,
+ IP_1588 = 0x0e,
+ MACSEC_INGR = 0x38,
+ MACSEC_EGR = 0x3c,
+};
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
@@ -128,6 +159,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
+#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
* in the same package.
*/
@@ -175,6 +207,9 @@ enum rgmii_rx_clock_delay {
#define SECURE_ON_ENABLE 0x8000
#define SECURE_ON_PASSWD_LEN_4 0x4000
+#define MSCC_PHY_EXTENDED_INT 28
+#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
+
/* Extended Page 3 Registers */
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
@@ -411,6 +446,44 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
},
};
+#if IS_ENABLED(CONFIG_MACSEC)
+struct macsec_flow {
+ struct list_head list;
+ enum mscc_macsec_destination_ports port;
+ enum macsec_bank bank;
+ u32 index;
+ int assoc_num;
+ bool has_transformation;
+
+ /* Highest takes precedence [0..15] */
+ u8 priority;
+
+ u8 key[MACSEC_KEYID_LEN];
+
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+
+ /* Matching */
+ struct {
+ u8 sci:1;
+ u8 tagged:1;
+ u8 untagged:1;
+ u8 etype:1;
+ } match;
+
+ u16 etype;
+
+ /* Action */
+ struct {
+ u8 bypass:1;
+ u8 drop:1;
+ } action;
+
+};
+#endif
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
@@ -424,6 +497,19 @@ struct vsc8531_private {
* package.
*/
unsigned int base_addr;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec fields:
+ * - One SecY per device (enforced at the s/w implementation level)
+ * - macsec_flows: list of h/w flows
+ * - ingr_flows: bitmap of ingress flows
+ * - egr_flows: bitmap of egress flows
+ */
+ struct macsec_secy *secy;
+ struct list_head macsec_flows;
+ unsigned long ingr_flows;
+ unsigned long egr_flows;
+#endif
};
#ifdef CONFIG_OF_MDIO
@@ -1584,6 +1670,978 @@ out:
return ret;
}
+#if IS_ENABLED(CONFIG_MACSEC)
+static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg)
+{
+ u32 val, val_l = 0, val_h = 0;
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if (bank >> 2 == 0x1)
+ /* non-MACsec access */
+ bank &= 0x3;
+ else
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
+ MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+ val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
+ val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+
+ return (val_h << 16) | val_l;
+}
+
+static void vsc8584_macsec_phy_write(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg, u32 val)
+{
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
+ bank &= 0x3;
+ else
+ /* MACsec access */
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+}
+
+static void vsc8584_macsec_classification(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ /* enable VLAN tag parsing */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
+ MSCC_MS_SAM_CP_TAG_PARSE_STAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
+ enum macsec_bank bank,
+ bool block)
+{
+ u32 port = (bank == MACSEC_INGR) ?
+ MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
+ u32 action = MSCC_MS_FLOW_BYPASS;
+
+ if (block)
+ action = MSCC_MS_FLOW_DROP;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
+}
+
+static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+
+ if (bank != MACSEC_INGR)
+ return;
+
+ /* Set default rules to pass unmatched frames */
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MS_PARAMS2_IG_CC_CONTROL);
+ val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
+ MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
+ val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_block_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_SW_RST |
+ MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
+
+ /* Set the MACsec block out of s/w reset and enable clocks */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
+ bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
+ MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
+ MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
+
+ /* Clear the counters */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Enable octet increment mode */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
+ MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Set the MTU */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
+
+ for (i = 0; i < 8; i++)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
+
+ if (bank == MACSEC_EGR) {
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
+ val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
+ MSCC_MS_FC_CFG_FCBUF_ENA |
+ MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
+ MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
+ MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
+ MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
+ }
+
+ vsc8584_macsec_classification(phydev, bank);
+ vsc8584_macsec_flow_default_action(phydev, bank, false);
+ vsc8584_macsec_integrity_checks(phydev, bank);
+
+ /* Enable the MACsec block */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
+}
+
+static void vsc8584_macsec_mac_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ /* Clear host & line stats */
+ for (i = 0; i < 36; i++)
+ vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
+ val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
+ val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
+ val |= 0xffff;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
+ if (bank == HOST_MAC)
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
+ else
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
+ (bank == HOST_MAC ?
+ MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
+ val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
+ val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
+ val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
+ MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
+ val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
+ MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_RX_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_ENA);
+}
+
+/* Must be called with mdio_lock taken */
+static int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ u32 val;
+
+ vsc8584_macsec_block_init(phydev, MACSEC_INGR);
+ vsc8584_macsec_block_init(phydev, MACSEC_EGR);
+ vsc8584_macsec_mac_init(phydev, HOST_MAC);
+ vsc8584_macsec_mac_init(phydev, LINE_MAC);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_FC_READ_THRESH_CFG,
+ MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
+ MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
+ val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
+ MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
+ MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG);
+ val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
+ val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
+ val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
+ val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+ val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+ vsc8584_macsec_phy_write(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+
+ return 0;
+}
+
+static void vsc8584_macsec_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
+
+ if (flow->match.tagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
+ if (flow->match.untagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
+
+ if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
+ match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
+ mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
+ }
+
+ if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
+ match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
+ mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
+ MSCC_MS_SAM_MASK_SCI_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
+ lower_32_bits(flow->rx_sa->sc->sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
+ upper_32_bits(flow->rx_sa->sc->sci));
+ }
+
+ if (flow->match.etype) {
+ mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
+ MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
+ }
+
+ match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
+
+ /* Action for matching packets */
+ if (flow->action.drop)
+ action = MSCC_MS_FLOW_DROP;
+ else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
+ action = MSCC_MS_FLOW_BYPASS;
+ else
+ action = (bank == MACSEC_INGR) ?
+ MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
+
+ val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
+ MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
+
+ if (action == MSCC_MS_FLOW_BYPASS)
+ goto write_ctrl;
+
+ if (bank == MACSEC_INGR) {
+ if (priv->secy->replay_protect)
+ val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
+ if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
+ else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
+ } else if (bank == MACSEC_EGR) {
+ if (priv->secy->protect_frames)
+ val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
+ if (priv->secy->tx_sc.encrypt)
+ val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
+ if (priv->secy->tx_sc.send_sci)
+ val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
+ }
+
+write_ctrl:
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
+ enum macsec_bank bank)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
+ if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
+ return pos;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
+ (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
+ return;
+
+ /* Enable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
+
+ /* Set in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ /* Disable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
+
+ /* Clear in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
+{
+ if (flow->bank == MACSEC_INGR)
+ return flow->index + MSCC_MS_MAX_FLOWS;
+
+ return flow->index;
+}
+
+/* Derive the AES key to get a key for the hash autentication */
+static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
+ u16 key_len, u8 hkey[16])
+{
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ struct skcipher_request *req = NULL;
+ struct scatterlist src, dst;
+ DECLARE_CRYPTO_WAIT(wait);
+ u32 input[4] = {0};
+ int ret;
+
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
+ &wait);
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0)
+ goto out;
+
+ sg_init_one(&src, input, 16);
+ sg_init_one(&dst, hkey, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+
+ ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return ret;
+}
+
+static int vsc8584_macsec_transformation(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ int i, ret, index = flow->index;
+ u32 rec = 0, control = 0;
+ u8 hkey[16];
+ sci_t sci;
+
+ ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
+ if (ret)
+ return ret;
+
+ switch (priv->secy->key_len) {
+ case 16:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
+ break;
+ case 32:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ control |= (bank == MACSEC_EGR) ?
+ (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
+ (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
+
+ control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
+ CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
+ CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
+ CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
+
+ /* Set the control word */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ control);
+
+ /* Set the context ID. Must be unique. */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ vsc8584_macsec_flow_context_id(flow));
+
+ /* Set the encryption/decryption key */
+ for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)flow->key)[i]);
+
+ /* Set the authentication key */
+ for (i = 0; i < 4; i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)hkey)[i]);
+
+ /* Initial sequence number */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ bank == MACSEC_INGR ?
+ flow->rx_sa->next_pn : flow->tx_sa->next_pn);
+
+ if (bank == MACSEC_INGR)
+ /* Set the mask (replay window size) */
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ priv->secy->replay_window);
+
+ /* Set the input vectors */
+ sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ lower_32_bits(sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ upper_32_bits(sci));
+
+ while (rec < 20)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ 0);
+
+ flow->has_transformation = true;
+ return 0;
+}
+
+static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
+ enum macsec_bank bank)
+{
+ unsigned long *bitmap = bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+ struct macsec_flow *flow;
+ int index;
+
+ index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
+
+ if (index == MSCC_MS_MAX_FLOWS)
+ return ERR_PTR(-ENOMEM);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(index, bitmap);
+ flow->index = index;
+ flow->bank = bank;
+ flow->priority = 8;
+ flow->assoc_num = -1;
+
+ list_add_tail(&flow->list, &priv->macsec_flows);
+ return flow;
+}
+
+static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
+ struct macsec_flow *flow)
+{
+ unsigned long *bitmap = flow->bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+
+ list_del(&flow->list);
+ clear_bit(flow->index, bitmap);
+ kfree(flow);
+}
+
+static int vsc8584_macsec_add_flow(struct phy_device *phydev,
+ struct macsec_flow *flow, bool update)
+{
+ int ret;
+
+ flow->port = MSCC_MS_PORT_CONTROLLED;
+ vsc8584_macsec_flow(phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(phydev, flow);
+ if (ret) {
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_default_flows(struct phy_device *phydev)
+{
+ struct macsec_flow *flow;
+
+ /* Add a rule to let the MKA traffic go through, ingress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_UNCONTROLLED;
+ flow->match.tagged = 1;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ /* Add a rule to let the MKA traffic go through, egress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_COMMON;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ return 0;
+}
+
+static void vsc8584_macsec_del_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ vsc8584_macsec_flow_disable(phydev, flow);
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+}
+
+static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->rx_sa = ctx->sa.rx_sa;
+
+ /* Always match tagged packets on ingress */
+ flow->match.tagged = 1;
+ flow->match.sci = 1;
+
+ if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->tx_sa = ctx->sa.tx_sa;
+
+ /* Always match untagged packets on egress */
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_secy *secy = ctx->secy;
+
+ if (ctx->prepare) {
+ if (priv->secy)
+ return -EEXIST;
+
+ return 0;
+ }
+
+ priv->secy = secy;
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+
+ return vsc8584_macsec_default_flows(ctx->phydev);
+}
+
+static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
+
+ priv->secy = NULL;
+ return 0;
+}
+
+static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
+{
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_secy(ctx);
+ return vsc8584_macsec_add_secy(ctx);
+}
+
+static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ if (flow->bank == MACSEC_INGR && flow->rx_sa &&
+ flow->rx_sa->sc->sci == ctx->rx_sc->sci)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_txsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_txsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static struct macsec_ops vsc8584_macsec_ops = {
+ .mdo_dev_open = vsc8584_macsec_dev_open,
+ .mdo_dev_stop = vsc8584_macsec_dev_stop,
+ .mdo_add_secy = vsc8584_macsec_add_secy,
+ .mdo_upd_secy = vsc8584_macsec_upd_secy,
+ .mdo_del_secy = vsc8584_macsec_del_secy,
+ .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
+ .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
+ .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
+ .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
+ .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
+ .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
+ .mdo_add_txsa = vsc8584_macsec_add_txsa,
+ .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
+ .mdo_del_txsa = vsc8584_macsec_del_txsa,
+};
+#endif /* CONFIG_MACSEC */
+
/* Check if one PHY has already done the init of the parts common to all PHYs
* in the Quad PHY package.
*/
@@ -1733,6 +2791,24 @@ static int vsc8584_config_init(struct phy_device *phydev)
mutex_unlock(&phydev->mdio.bus->mdio_lock);
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec */
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
+ INIT_LIST_HEAD(&vsc8531->macsec_flows);
+ vsc8531->secy = NULL;
+
+ phydev->macsec_ops = &vsc8584_macsec_ops;
+
+ ret = vsc8584_macsec_init(phydev);
+ if (ret)
+ goto err;
+ }
+#endif
+
phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
@@ -1758,6 +2834,43 @@ err:
return ret;
}
+static int vsc8584_handle_interrupt(struct phy_device *phydev)
+{
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow, *tmp;
+ u32 cause, rec;
+
+ /* Check MACsec PN rollover */
+ cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS);
+ cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
+ if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
+ goto skip_rollover;
+
+ rec = 6 + priv->secy->key_len / sizeof(u32);
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ u32 val;
+
+ if (flow->bank != MACSEC_EGR || !flow->has_transformation)
+ continue;
+
+ val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_XFORM_REC(flow->index, rec));
+ if (val == 0xffffffff) {
+ vsc8584_macsec_flow_disable(phydev, flow);
+ macsec_pn_wrapped(priv->secy, flow->tx_sa);
+ break;
+ }
+ }
+
+skip_rollover:
+#endif
+
+ phy_mac_interrupt(phydev);
+ return 0;
+}
+
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc, i, phy_id;
@@ -2201,6 +3314,20 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+#if IS_ENABLED(CONFIG_MACSEC)
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_2);
+ phy_write(phydev, MSCC_PHY_EXTENDED_INT,
+ MSCC_PHY_EXTENDED_INT_MS_EGR);
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
+ MSCC_MS_AIC_CTRL, 0xf);
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS,
+ MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
+#endif
rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
MII_VSC85XX_INT_MASK_MASK);
} else {
@@ -2404,7 +3531,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2429,7 +3555,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2454,7 +3579,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2479,7 +3603,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2504,7 +3627,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2530,7 +3652,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2556,6 +3677,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2608,6 +3730,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2632,6 +3755,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2656,6 +3780,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
diff --git a/drivers/net/phy/mscc_fc_buffer.h b/drivers/net/phy/mscc_fc_buffer.h
new file mode 100644
index 000000000000..7e9c0e877895
--- /dev/null
+++ b/drivers/net/phy/mscc_fc_buffer.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (C) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_FC_BUFFER_H_
+#define _MSCC_OCELOT_FC_BUFFER_H_
+
+#define MSCC_FCBUF_ENA_CFG 0x00
+#define MSCC_FCBUF_MODE_CFG 0x01
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG 0x02
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG 0x03
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG 0x04
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG 0x05
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG 0x06
+#define MSCC_FCBUF_FC_READ_THRESH_CFG 0x07
+#define MSCC_FCBUF_TX_FRM_GAP_COMP 0x08
+
+#define MSCC_FCBUF_ENA_CFG_TX_ENA BIT(0)
+#define MSCC_FCBUF_ENA_CFG_RX_ENA BIT(4)
+
+#define MSCC_FCBUF_MODE_CFG_DROP_BEHAVIOUR BIT(4)
+#define MSCC_FCBUF_MODE_CFG_PAUSE_REACT_ENA BIT(8)
+#define MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA BIT(12)
+#define MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA BIT(16)
+#define MSCC_FCBUF_MODE_CFG_TX_CTRL_QUEUE_ENA BIT(20)
+#define MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA BIT(24)
+#define MSCC_FCBUF_MODE_CFG_INCLUDE_PAUSE_RCVD_IN_PAUSE_GEN BIT(28)
+
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(x) (x)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(x) ((x) << 16)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET_M GENMASK(19, 16)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH(x) ((x) << 20)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH_M GENMASK(31, 20)
+
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH(x) (x)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH(x) ((x) << 16)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(x) (x)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(x) ((x) << 16)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH_M GENMASK(31, 16)
+
+#endif
diff --git a/drivers/net/phy/mscc_mac.h b/drivers/net/phy/mscc_mac.h
new file mode 100644
index 000000000000..9420ee5175a6
--- /dev/null
+++ b/drivers/net/phy/mscc_mac.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_LINE_MAC_H_
+#define _MSCC_OCELOT_LINE_MAC_H_
+
+#define MSCC_MAC_CFG_ENA_CFG 0x00
+#define MSCC_MAC_CFG_MODE_CFG 0x01
+#define MSCC_MAC_CFG_MAXLEN_CFG 0x02
+#define MSCC_MAC_CFG_NUM_TAGS_CFG 0x03
+#define MSCC_MAC_CFG_TAGS_CFG 0x04
+#define MSCC_MAC_CFG_ADV_CHK_CFG 0x07
+#define MSCC_MAC_CFG_LFS_CFG 0x08
+#define MSCC_MAC_CFG_LB_CFG 0x09
+#define MSCC_MAC_CFG_PKTINF_CFG 0x0a
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL 0x0b
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2 0x0c
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL 0x0d
+#define MSCC_MAC_PAUSE_CFG_STATE 0x0e
+#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_LSB 0x0f
+#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_MSB 0x10
+#define MSCC_MAC_STATUS_RX_LANE_STICKY_0 0x11
+#define MSCC_MAC_STATUS_RX_LANE_STICKY_1 0x12
+#define MSCC_MAC_STATUS_TX_MONITOR_STICKY 0x13
+#define MSCC_MAC_STATUS_TX_MONITOR_STICKY_MASK 0x14
+#define MSCC_MAC_STATUS_STICKY 0x15
+#define MSCC_MAC_STATUS_STICKY_MASK 0x16
+#define MSCC_MAC_STATS_32BIT_RX_HIH_CKSM_ERR_CNT 0x17
+#define MSCC_MAC_STATS_32BIT_RX_XGMII_PROT_ERR_CNT 0x18
+#define MSCC_MAC_STATS_32BIT_RX_SYMBOL_ERR_CNT 0x19
+#define MSCC_MAC_STATS_32BIT_RX_PAUSE_CNT 0x1a
+#define MSCC_MAC_STATS_32BIT_RX_UNSUP_OPCODE_CNT 0x1b
+#define MSCC_MAC_STATS_32BIT_RX_UC_CNT 0x1c
+#define MSCC_MAC_STATS_32BIT_RX_MC_CNT 0x1d
+#define MSCC_MAC_STATS_32BIT_RX_BC_CNT 0x1e
+#define MSCC_MAC_STATS_32BIT_RX_CRC_ERR_CNT 0x1f
+#define MSCC_MAC_STATS_32BIT_RX_UNDERSIZE_CNT 0x20
+#define MSCC_MAC_STATS_32BIT_RX_FRAGMENTS_CNT 0x21
+#define MSCC_MAC_STATS_32BIT_RX_IN_RANGE_LEN_ERR_CNT 0x22
+#define MSCC_MAC_STATS_32BIT_RX_OUT_OF_RANGE_LEN_ERR_CNT 0x23
+#define MSCC_MAC_STATS_32BIT_RX_OVERSIZE_CNT 0x24
+#define MSCC_MAC_STATS_32BIT_RX_JABBERS_CNT 0x25
+#define MSCC_MAC_STATS_32BIT_RX_SIZE64_CNT 0x26
+#define MSCC_MAC_STATS_32BIT_RX_SIZE65TO127_CNT 0x27
+#define MSCC_MAC_STATS_32BIT_RX_SIZE128TO255_CNT 0x28
+#define MSCC_MAC_STATS_32BIT_RX_SIZE256TO511_CNT 0x29
+#define MSCC_MAC_STATS_32BIT_RX_SIZE512TO1023_CNT 0x2a
+#define MSCC_MAC_STATS_32BIT_RX_SIZE1024TO1518_CNT 0x2b
+#define MSCC_MAC_STATS_32BIT_RX_SIZE1519TOMAX_CNT 0x2c
+#define MSCC_MAC_STATS_32BIT_RX_IPG_SHRINK_CNT 0x2d
+#define MSCC_MAC_STATS_32BIT_TX_PAUSE_CNT 0x2e
+#define MSCC_MAC_STATS_32BIT_TX_UC_CNT 0x2f
+#define MSCC_MAC_STATS_32BIT_TX_MC_CNT 0x30
+#define MSCC_MAC_STATS_32BIT_TX_BC_CNT 0x31
+#define MSCC_MAC_STATS_32BIT_TX_SIZE64_CNT 0x32
+#define MSCC_MAC_STATS_32BIT_TX_SIZE65TO127_CNT 0x33
+#define MSCC_MAC_STATS_32BIT_TX_SIZE128TO255_CNT 0x34
+#define MSCC_MAC_STATS_32BIT_TX_SIZE256TO511_CNT 0x35
+#define MSCC_MAC_STATS_32BIT_TX_SIZE512TO1023_CNT 0x36
+#define MSCC_MAC_STATS_32BIT_TX_SIZE1024TO1518_CNT 0x37
+#define MSCC_MAC_STATS_32BIT_TX_SIZE1519TOMAX_CNT 0x38
+#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_CNT 0x39
+#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_MSB_CNT 0x3a
+#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_CNT 0x3b
+#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_MSB_CNT 0x3c
+#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_CNT 0x3d
+#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_MSB_CNT 0x3e
+#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_CNT 0x3f
+#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_MSB_CNT 0x40
+#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_CNT 0x41
+#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_MSB_CNT 0x42
+
+#define MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA BIT(0)
+#define MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA BIT(4)
+#define MSCC_MAC_CFG_ENA_CFG_RX_SW_RST BIT(8)
+#define MSCC_MAC_CFG_ENA_CFG_TX_SW_RST BIT(12)
+#define MSCC_MAC_CFG_ENA_CFG_RX_ENA BIT(16)
+#define MSCC_MAC_CFG_ENA_CFG_TX_ENA BIT(20)
+
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL(x) ((x) << 20)
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL_M GENMASK(29, 20)
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE BIT(16)
+#define MSCC_MAC_CFG_MODE_CFG_TUNNEL_PAUSE_FRAMES BIT(14)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG(x) ((x) << 10)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG_M GENMASK(12, 10)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_IPG_CFG BIT(6)
+#define MSCC_MAC_CFG_MODE_CFG_XGMII_GEN_MODE_ENA BIT(4)
+#define MSCC_MAC_CFG_MODE_CFG_HIH_CRC_CHECK BIT(2)
+#define MSCC_MAC_CFG_MODE_CFG_UNDERSIZED_FRAME_DROP_DIS BIT(1)
+#define MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC BIT(0)
+
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(x) (x)
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M GENMASK(15, 0)
+
+#define MSCC_MAC_CFG_TAGS_CFG_RSZ 0x4
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID(x) ((x) << 16)
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ENA BIT(4)
+
+#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+
+#define MSCC_MAC_CFG_LFS_CFG_LFS_INH_TX BIT(8)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_DIS_TX BIT(4)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_UNIDIR_ENA BIT(3)
+#define MSCC_MAC_CFG_LFS_CFG_USE_LEADING_EDGE_DETECT BIT(2)
+#define MSCC_MAC_CFG_LFS_CFG_SPURIOUS_Q_DIS BIT(1)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA BIT(0)
+
+#define MSCC_MAC_CFG_LB_CFG_XGMII_HOST_LB_ENA BIT(4)
+#define MSCC_MAC_CFG_LB_CFG_XGMII_PHY_LB_ENA BIT(0)
+
+#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA BIT(0)
+#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA BIT(4)
+#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA BIT(8)
+#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA BIT(12)
+#define MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA BIT(16)
+#define MSCC_MAC_CFG_PKTINF_CFG_LF_RELAY_ENA BIT(20)
+#define MSCC_MAC_CFG_PKTINF_CFG_RF_RELAY_ENA BIT(24)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING BIT(25)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_RX_PADDING BIT(26)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_4BYTE_PREAMBLE BIT(27)
+#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(x) ((x) << 28)
+#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS_M GENMASK(30, 28)
+
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(x) ((x) << 16)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE_M GENMASK(31, 16)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_WAIT_FOR_LPI_LOW BIT(12)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_USE_PAUSE_STALL_ENA BIT(8)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_REPL_MODE BIT(4)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_FRC_FRAME BIT(2)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(x) (x)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M GENMASK(1, 0)
+
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA BIT(16)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PRE_CRC_MODE BIT(20)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA BIT(12)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA BIT(8)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA BIT(4)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE BIT(0)
+
+#define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0)
+#define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4)
+
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
+
+#endif /* _MSCC_OCELOT_LINE_MAC_H_ */
diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc_macsec.h
new file mode 100644
index 000000000000..d9ab6aba7482
--- /dev/null
+++ b/drivers/net/phy/mscc_macsec.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_MACSEC_H_
+#define _MSCC_OCELOT_MACSEC_H_
+
+#define MSCC_MS_MAX_FLOWS 16
+
+#define CONTROL_TYPE_EGRESS 0x6
+#define CONTROL_TYPE_INGRESS 0xf
+#define CONTROL_IV0 BIT(5)
+#define CONTROL_IV1 BIT(6)
+#define CONTROL_IV2 BIT(7)
+#define CONTROL_UPDATE_SEQ BIT(13)
+#define CONTROL_IV_IN_SEQ BIT(14)
+#define CONTROL_ENCRYPT_AUTH BIT(15)
+#define CONTROL_KEY_IN_CTX BIT(16)
+#define CONTROL_CRYPTO_ALG(x) ((x) << 17)
+#define CTRYPTO_ALG_AES_CTR_128 0x5
+#define CTRYPTO_ALG_AES_CTR_192 0x6
+#define CTRYPTO_ALG_AES_CTR_256 0x7
+#define CONTROL_DIGEST_TYPE(x) ((x) << 21)
+#define CONTROL_AUTH_ALG(x) ((x) << 23)
+#define AUTH_ALG_AES_GHAS 0x4
+#define CONTROL_AN(x) ((x) << 26)
+#define CONTROL_SEQ_TYPE(x) ((x) << 28)
+#define CONTROL_SEQ_MASK BIT(30)
+#define CONTROL_CONTEXT_ID BIT(31)
+
+enum mscc_macsec_destination_ports {
+ MSCC_MS_PORT_COMMON = 0,
+ MSCC_MS_PORT_RSVD = 1,
+ MSCC_MS_PORT_CONTROLLED = 2,
+ MSCC_MS_PORT_UNCONTROLLED = 3,
+};
+
+enum mscc_macsec_drop_actions {
+ MSCC_MS_ACTION_BYPASS_CRC = 0,
+ MSCC_MS_ACTION_BYPASS_BAD = 1,
+ MSCC_MS_ACTION_DROP = 2,
+ MSCC_MS_ACTION_BYPASS = 3,
+};
+
+enum mscc_macsec_flow_types {
+ MSCC_MS_FLOW_BYPASS = 0,
+ MSCC_MS_FLOW_DROP = 1,
+ MSCC_MS_FLOW_INGRESS = 2,
+ MSCC_MS_FLOW_EGRESS = 3,
+};
+
+enum mscc_macsec_validate_levels {
+ MSCC_MS_VALIDATE_DISABLED = 0,
+ MSCC_MS_VALIDATE_CHECK = 1,
+ MSCC_MS_VALIDATE_STRICT = 2,
+};
+
+#define MSCC_MS_XFORM_REC(x, y) (((x) << 5) + (y))
+#define MSCC_MS_ENA_CFG 0x800
+#define MSCC_MS_FC_CFG 0x804
+#define MSCC_MS_SAM_MAC_SA_MATCH_LO(x) (0x1000 + ((x) << 4))
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI(x) (0x1001 + ((x) << 4))
+#define MSCC_MS_SAM_MISC_MATCH(x) (0x1004 + ((x) << 4))
+#define MSCC_MS_SAM_MATCH_SCI_LO(x) (0x1005 + ((x) << 4))
+#define MSCC_MS_SAM_MATCH_SCI_HI(x) (0x1006 + ((x) << 4))
+#define MSCC_MS_SAM_MASK(x) (0x1007 + ((x) << 4))
+#define MSCC_MS_SAM_ENTRY_SET1 0x1808
+#define MSCC_MS_SAM_ENTRY_CLEAR1 0x180c
+#define MSCC_MS_SAM_FLOW_CTRL(x) (0x1c00 + (x))
+#define MSCC_MS_SAM_CP_TAG 0x1e40
+#define MSCC_MS_SAM_NM_FLOW_NCP 0x1e51
+#define MSCC_MS_SAM_NM_FLOW_CP 0x1e52
+#define MSCC_MS_MISC_CONTROL 0x1e5f
+#define MSCC_MS_COUNT_CONTROL 0x3204
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL 0x3a10
+#define MSCC_MS_PARAMS2_IG_CP_TAG 0x3a14
+#define MSCC_MS_VLAN_MTU_CHECK(x) (0x3c40 + (x))
+#define MSCC_MS_NON_VLAN_MTU_CHECK 0x3c48
+#define MSCC_MS_PP_CTRL 0x3c4b
+#define MSCC_MS_STATUS_CONTEXT_CTRL 0x3d02
+#define MSCC_MS_INTR_CTRL_STATUS 0x3d04
+#define MSCC_MS_BLOCK_CTX_UPDATE 0x3d0c
+#define MSCC_MS_AIC_CTRL 0x3e02
+
+/* MACSEC_ENA_CFG */
+#define MSCC_MS_ENA_CFG_CLK_ENA BIT(0)
+#define MSCC_MS_ENA_CFG_SW_RST BIT(1)
+#define MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA BIT(8)
+#define MSCC_MS_ENA_CFG_MACSEC_ENA BIT(9)
+#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(x) ((x) << 10)
+#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE_M GENMASK(12, 10)
+
+/* MACSEC_FC_CFG */
+#define MSCC_MS_FC_CFG_FCBUF_ENA BIT(0)
+#define MSCC_MS_FC_CFG_USE_PKT_EXPANSION_INDICATION BIT(1)
+#define MSCC_MS_FC_CFG_LOW_THRESH(x) ((x) << 4)
+#define MSCC_MS_FC_CFG_LOW_THRESH_M GENMASK(7, 4)
+#define MSCC_MS_FC_CFG_HIGH_THRESH(x) ((x) << 8)
+#define MSCC_MS_FC_CFG_HIGH_THRESH_M GENMASK(11, 8)
+#define MSCC_MS_FC_CFG_LOW_BYTES_VAL(x) ((x) << 12)
+#define MSCC_MS_FC_CFG_LOW_BYTES_VAL_M GENMASK(14, 12)
+#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL(x) ((x) << 16)
+#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL_M GENMASK(18, 16)
+
+/* MSCC_MS_SAM_MAC_SA_MATCH_HI */
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE_M GENMASK(31, 16)
+
+/* MACSEC_SAM_MISC_MATCH */
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_VALID BIT(0)
+#define MSCC_MS_SAM_MISC_MATCH_QINQ_FOUND BIT(1)
+#define MSCC_MS_SAM_MISC_MATCH_STAG_VALID BIT(2)
+#define MSCC_MS_SAM_MISC_MATCH_QTAG_VALID BIT(3)
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP(x) ((x) << 4)
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP_M GENMASK(6, 4)
+#define MSCC_MS_SAM_MISC_MATCH_CONTROL_PACKET BIT(7)
+#define MSCC_MS_SAM_MISC_MATCH_UNTAGGED BIT(8)
+#define MSCC_MS_SAM_MISC_MATCH_TAGGED BIT(9)
+#define MSCC_MS_SAM_MISC_MATCH_BAD_TAG BIT(10)
+#define MSCC_MS_SAM_MISC_MATCH_KAY_TAG BIT(11)
+#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT(x) ((x) << 12)
+#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT_M GENMASK(13, 12)
+#define MSCC_MS_SAM_MISC_MATCH_PRIORITY(x) ((x) << 16)
+#define MSCC_MS_SAM_MISC_MATCH_PRIORITY_M GENMASK(19, 16)
+#define MSCC_MS_SAM_MISC_MATCH_AN(x) ((x) << 24)
+#define MSCC_MS_SAM_MISC_MATCH_TCI(x) ((x) << 26)
+
+/* MACSEC_SAM_MASK */
+#define MSCC_MS_SAM_MASK_MAC_SA_MASK(x) (x)
+#define MSCC_MS_SAM_MASK_MAC_SA_MASK_M GENMASK(5, 0)
+#define MSCC_MS_SAM_MASK_MAC_DA_MASK(x) ((x) << 6)
+#define MSCC_MS_SAM_MASK_MAC_DA_MASK_M GENMASK(11, 6)
+#define MSCC_MS_SAM_MASK_MAC_ETYPE_MASK BIT(12)
+#define MSCC_MS_SAM_MASK_VLAN_VLD_MASK BIT(13)
+#define MSCC_MS_SAM_MASK_QINQ_FOUND_MASK BIT(14)
+#define MSCC_MS_SAM_MASK_STAG_VLD_MASK BIT(15)
+#define MSCC_MS_SAM_MASK_QTAG_VLD_MASK BIT(16)
+#define MSCC_MS_SAM_MASK_VLAN_UP_MASK BIT(17)
+#define MSCC_MS_SAM_MASK_VLAN_ID_MASK BIT(18)
+#define MSCC_MS_SAM_MASK_SOURCE_PORT_MASK BIT(19)
+#define MSCC_MS_SAM_MASK_CTL_PACKET_MASK BIT(20)
+#define MSCC_MS_SAM_MASK_VLAN_UP_INNER_MASK BIT(21)
+#define MSCC_MS_SAM_MASK_VLAN_ID_INNER_MASK BIT(22)
+#define MSCC_MS_SAM_MASK_SCI_MASK BIT(23)
+#define MSCC_MS_SAM_MASK_AN_MASK(x) ((x) << 24)
+#define MSCC_MS_SAM_MASK_TCI_MASK(x) ((x) << 26)
+
+/* MACSEC_SAM_FLOW_CTRL_EGR */
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE_M GENMASK(1, 0)
+#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT_M GENMASK(3, 2)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_4 BIT(4)
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_CRYPT_AUTH BIT(5)
+#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION_M GENMASK(7, 6)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8(x) ((x) << 8)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8_M GENMASK(15, 8)
+#define MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME BIT(16)
+#define MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT BIT(16)
+#define MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE BIT(17)
+#define MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI BIT(18)
+#define MSCC_MS_SAM_FLOW_CTRL_USE_ES BIT(19)
+#define MSCC_MS_SAM_FLOW_CTRL_USE_SCB BIT(20)
+#define MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(x) ((x) << 19)
+#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE(x) ((x) << 21)
+#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE_M GENMASK(22, 21)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_23 BIT(23)
+#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET(x) ((x) << 24)
+#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET_M GENMASK(30, 24)
+#define MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT BIT(31)
+
+/* MACSEC_SAM_CP_TAG */
+#define MSCC_MS_SAM_CP_TAG_MAP_TBL(x) (x)
+#define MSCC_MS_SAM_CP_TAG_MAP_TBL_M GENMASK(23, 0)
+#define MSCC_MS_SAM_CP_TAG_DEF_UP(x) ((x) << 24)
+#define MSCC_MS_SAM_CP_TAG_DEF_UP_M GENMASK(26, 24)
+#define MSCC_MS_SAM_CP_TAG_STAG_UP_EN BIT(27)
+#define MSCC_MS_SAM_CP_TAG_QTAG_UP_EN BIT(28)
+#define MSCC_MS_SAM_CP_TAG_PARSE_QINQ BIT(29)
+#define MSCC_MS_SAM_CP_TAG_PARSE_STAG BIT(30)
+#define MSCC_MS_SAM_CP_TAG_PARSE_QTAG BIT(31)
+
+/* MACSEC_SAM_NM_FLOW_NCP */
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(x) ((x) << 8)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(x) ((x) << 10)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(x) ((x) << 14)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(x) ((x) << 18)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(x) ((x) << 22)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(x) ((x) << 24)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(x) ((x) << 26)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(x) ((x) << 30)
+
+/* MACSEC_SAM_NM_FLOW_CP */
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_FLOW_TYPE(x) ((x) << 8)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(x) ((x) << 10)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(x) ((x) << 14)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_FLOW_TYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(x) ((x) << 18)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(x) ((x) << 22)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_FLOW_TYPE(x) ((x) << 24)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(x) ((x) << 26)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(x) ((x) << 30)
+
+/* MACSEC_MISC_CONTROL */
+#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(x) (x)
+#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX_M GENMASK(5, 0)
+#define MSCC_MS_MISC_CONTROL_STATIC_BYPASS BIT(8)
+#define MSCC_MS_MISC_CONTROL_NM_MACSEC_EN BIT(9)
+#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES(x) ((x) << 10)
+#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES_M GENMASK(11, 10)
+#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(x) ((x) << 24)
+#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE_M GENMASK(25, 24)
+
+/* MACSEC_COUNT_CONTROL */
+#define MSCC_MS_COUNT_CONTROL_RESET_ALL BIT(0)
+#define MSCC_MS_COUNT_CONTROL_DEBUG_ACCESS BIT(1)
+#define MSCC_MS_COUNT_CONTROL_SATURATE_CNTRS BIT(2)
+#define MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET BIT(3)
+
+/* MACSEC_PARAMS2_IG_CC_CONTROL */
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT BIT(14)
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT BIT(15)
+
+/* MACSEC_PARAMS2_IG_CP_TAG */
+#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL(x) (x)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL_M GENMASK(23, 0)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP(x) ((x) << 24)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP_M GENMASK(26, 24)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_STAG_UP_EN BIT(27)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_QTAG_UP_EN BIT(28)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ BIT(29)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG BIT(30)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG BIT(31)
+
+/* MACSEC_VLAN_MTU_CHECK */
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(x) (x)
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE_M GENMASK(14, 0)
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP BIT(15)
+
+/* MACSEC_NON_VLAN_MTU_CHECK */
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(x) (x)
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE_M GENMASK(14, 0)
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP BIT(15)
+
+/* MACSEC_PP_CTRL */
+#define MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE BIT(0)
+
+/* MACSEC_INTR_CTRL_STATUS */
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS(x) (x)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M GENMASK(15, 0)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(x) ((x) << 16)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M GENMASK(31, 16)
+#define MACSEC_INTR_CTRL_STATUS_ROLLOVER BIT(5)
+
+#endif
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 769a076514b0..a4d2d59fceca 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -387,7 +387,7 @@ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv->read_mmd) {
+ if (phydev->drv && phydev->drv->read_mmd) {
val = phydev->drv->read_mmd(phydev, devad, regnum);
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
@@ -444,7 +444,7 @@ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv->write_mmd) {
+ if (phydev->drv && phydev->drv->write_mmd) {
ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 80be4d691e5b..d76e038cf2cb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -422,8 +422,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->drv && phydev->drv->hwtstamp)
- return phydev->drv->hwtstamp(phydev, ifr);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
/* fall through */
default:
@@ -432,6 +432,31 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+/**
+ * phy_do_ioctl - generic ndo_do_ioctl implementation
+ * @dev: the net_device struct
+ * @ifr: &struct ifreq for socket ioctl's
+ * @cmd: ioctl cmd to execute
+ */
+int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (!dev->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+EXPORT_SYMBOL(phy_do_ioctl);
+
+/* same as phy_do_ioctl, but ensures that net_device is running */
+int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(dev))
+ return -ENODEV;
+
+ return phy_do_ioctl(dev, ifr, cmd);
+}
+EXPORT_SYMBOL(phy_do_ioctl_running);
+
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b13c52873ef5..6a5056e0ae77 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -881,6 +881,9 @@ EXPORT_SYMBOL(phy_device_register);
*/
void phy_device_remove(struct phy_device *phydev)
{
+ if (phydev->mii_ts)
+ unregister_mii_timestamper(phydev->mii_ts);
+
device_del(&phydev->mdio.dev);
/* Assert the reset signal */
@@ -919,6 +922,8 @@ static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier)
netif_carrier_off(netdev);
}
phydev->adjust_link(netdev);
+ if (phydev->mii_ts && phydev->mii_ts->link_state)
+ phydev->mii_ts->link_state(phydev->mii_ts, phydev);
}
/**
@@ -1102,9 +1107,8 @@ void phy_attached_info(struct phy_device *phydev)
EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)"
-void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+char *phy_attached_info_irq(struct phy_device *phydev)
{
- const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
char *irq_str;
char irq_num[8];
@@ -1121,6 +1125,14 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
break;
}
+ return kasprintf(GFP_KERNEL, "%s", irq_str);
+}
+EXPORT_SYMBOL(phy_attached_info_irq);
+
+void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+{
+ const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+ char *irq_str = phy_attached_info_irq(phydev);
if (!fmt) {
phydev_info(phydev, ATTACHED_FMT "\n",
@@ -1137,6 +1149,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
vprintk(fmt, ap);
va_end(ap);
}
+ kfree(irq_str);
}
EXPORT_SYMBOL(phy_attached_print);
@@ -1771,6 +1784,36 @@ int genphy_restart_aneg(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_restart_aneg);
/**
+ * genphy_check_and_restart_aneg - Enable and restart auto-negotiation
+ * @phydev: target phy_device struct
+ * @restart: whether aneg restart is requested
+ *
+ * Check, and restart auto-negotiation if needed.
+ */
+int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
+{
+ int ret = 0;
+
+ if (!restart) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE))
+ restart = true;
+ }
+
+ if (restart)
+ ret = genphy_restart_aneg(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL(genphy_check_and_restart_aneg);
+
+/**
* __genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
* @changed: whether autoneg is requested
@@ -1795,23 +1838,7 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
else if (err)
changed = true;
- if (!changed) {
- /* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated?
- */
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = true; /* do restart aneg */
- }
-
- /* Only restart aneg if we are advertising something different
- * than we were before.
- */
- return changed ? genphy_restart_aneg(phydev) : 0;
+ return genphy_check_and_restart_aneg(phydev, changed);
}
EXPORT_SYMBOL(__genphy_config_aneg);
@@ -1979,6 +2006,36 @@ int genphy_read_lpa(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_read_lpa);
/**
+ * genphy_read_status_fixed - read the link parameters for !aneg mode
+ * @phydev: target phy_device struct
+ *
+ * Read the current duplex and speed state for a PHY operating with
+ * autonegotiation disabled.
+ */
+int genphy_read_status_fixed(struct phy_device *phydev)
+{
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_status_fixed);
+
+/**
* genphy_read_status - check the link status and update current link state
* @phydev: target phy_device struct
*
@@ -2012,22 +2069,9 @@ int genphy_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
phy_resolve_aneg_linkmode(phydev);
} else if (phydev->autoneg == AUTONEG_DISABLE) {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ err = genphy_read_status_fixed(phydev);
+ if (err < 0)
+ return err;
}
return 0;
@@ -2575,7 +2619,6 @@ static struct phy_driver genphy_driver = {
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
.get_features = genphy_read_abilities,
- .aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ee7a718662c6..70b9a143db84 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -48,7 +48,8 @@ struct phylink {
unsigned long phylink_disable_state; /* bitmask of disables */
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
- u8 link_an_mode; /* MLO_AN_xxx */
+ u8 cfg_link_an_mode; /* MLO_AN_xxx */
+ u8 cur_link_an_mode;
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
@@ -71,6 +72,9 @@ struct phylink {
bool mac_link_dropped;
struct sfp_bus *sfp_bus;
+ bool sfp_may_have_phy;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ u8 sfp_port;
};
#define phylink_printk(level, pl, fmt, ...) \
@@ -182,8 +186,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->link_config.pause |= MLO_PAUSE_ASYM;
if (ret == 0) {
- desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
- 0, GPIOD_IN, "?");
+ desc = fwnode_gpiod_get_index(fixed_node, "link", 0,
+ GPIOD_IN, "?");
if (!IS_ERR(desc))
pl->link_gpio = desc;
@@ -256,12 +260,12 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
dn = fwnode_get_named_child_node(fwnode, "fixed-link");
if (dn || fwnode_property_present(fwnode, "fixed-link"))
- pl->link_an_mode = MLO_AN_FIXED;
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
fwnode_handle_put(dn);
if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
- if (pl->link_an_mode == MLO_AN_FIXED) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
phylink_err(pl,
"can't use both fixed-link and in-band-status\n");
return -EINVAL;
@@ -273,10 +277,11 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
pl->link_config.an_enabled = true;
- pl->link_an_mode = MLO_AN_INBAND;
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -293,7 +298,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 2500baseX_Full);
break;
+ case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -301,6 +308,10 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 2500baseT_Full);
+ phylink_set(pl->supported, 2500baseX_Full);
+ phylink_set(pl->supported, 5000baseT_Full);
+ phylink_set(pl->supported, 10000baseT_Full);
phylink_set(pl->supported, 10000baseKR_Full);
phylink_set(pl->supported, 10000baseCR_Full);
phylink_set(pl->supported, 10000baseSR_Full);
@@ -333,14 +344,14 @@ static void phylink_mac_config(struct phylink *pl,
{
phylink_dbg(pl,
"%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
- __func__, phylink_an_mode_str(pl->link_an_mode),
+ __func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
- pl->ops->mac_config(pl->config, pl->link_an_mode, state);
+ pl->ops->mac_config(pl->config, pl->cur_link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -441,7 +452,7 @@ static void phylink_mac_link_up(struct phylink *pl,
struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(pl->config, pl->link_an_mode,
+ pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode,
pl->cur_interface, pl->phydev);
if (ndev)
@@ -460,7 +471,7 @@ static void phylink_mac_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->ops->mac_link_down(pl->config, pl->link_an_mode,
+ pl->ops->mac_link_down(pl->config, pl->cur_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
@@ -479,7 +490,7 @@ static void phylink_resolve(struct work_struct *w)
} else if (pl->mac_link_dropped) {
link_state.link = false;
} else {
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
phylink_resolve_flow(pl, &link_state);
@@ -650,7 +661,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(ret);
}
- if (pl->link_an_mode == MLO_AN_FIXED) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
ret = phylink_parse_fixedlink(pl, fwnode);
if (ret < 0) {
kfree(pl);
@@ -658,6 +669,8 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
kfree(pl);
@@ -713,10 +726,12 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
phy_duplex_to_str(phydev->duplex));
}
-static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
+static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
{
struct phylink_link_state config;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ char *irq_str;
int ret;
/*
@@ -731,7 +746,19 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
memset(&config, 0, sizeof(config));
linkmode_copy(supported, phy->supported);
linkmode_copy(config.advertising, phy->advertising);
- config.interface = pl->link_config.interface;
+
+ /* Clause 45 PHYs switch their Serdes lane between several different
+ * modes, normally 10GBASE-R, SGMII. Some use 2500BASE-X for 2.5G
+ * speeds. We really need to know which interface modes the PHY and
+ * MAC supports to properly work out which linkmodes can be supported.
+ */
+ if (phy->is_c45 &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_XAUI &&
+ interface != PHY_INTERFACE_MODE_USXGMII)
+ config.interface = PHY_INTERFACE_MODE_NA;
+ else
+ config.interface = interface;
ret = phylink_validate(pl, supported, &config);
if (ret)
@@ -740,13 +767,16 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
phy->phylink = pl;
phy->phy_link_change = phylink_phy_change;
+ irq_str = phy_attached_info_irq(phy);
phylink_info(pl,
- "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
- phy->drv->name);
+ "PHY [%s] driver [%s] (irq=%s)\n",
+ dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
+ kfree(irq_str);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
+ pl->phy_state.interface = interface;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -766,28 +796,18 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
-static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
- phy_interface_t interface)
+static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
{
- int ret;
-
- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
+ if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(interface))))
return -EINVAL;
if (pl->phydev)
return -EBUSY;
- ret = phy_attach_direct(pl->netdev, phy, 0, interface);
- if (ret)
- return ret;
-
- ret = phylink_bringup_phy(pl, phy);
- if (ret)
- phy_detach(phy);
-
- return ret;
+ return phy_attach_direct(pl->netdev, phy, 0, interface);
}
/**
@@ -807,13 +827,23 @@ static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
*/
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
+ int ret;
+
/* Use PHY device/driver interface */
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
pl->link_interface = phy->interface;
pl->link_config.interface = pl->link_interface;
}
- return __phylink_connect_phy(pl, phy, pl->link_interface);
+ ret = phylink_attach_phy(pl, phy, pl->link_interface);
+ if (ret < 0)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy, pl->link_config.interface);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
@@ -837,8 +867,8 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
int ret;
/* Fixed links and 802.3z are handled without needing a PHY */
- if (pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
@@ -849,20 +879,23 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
phy_node = of_parse_phandle(dn, "phy-device", 0);
if (!phy_node) {
- if (pl->link_an_mode == MLO_AN_PHY)
+ if (pl->cfg_link_an_mode == MLO_AN_PHY)
return -ENODEV;
return 0;
}
- phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
- pl->link_interface);
+ phy_dev = of_phy_find_device(phy_node);
/* We're done with the phy_node handle */
of_node_put(phy_node);
-
if (!phy_dev)
return -ENODEV;
- ret = phylink_bringup_phy(pl, phy_dev);
+ ret = phy_attach_direct(pl->netdev, phy_dev, flags,
+ pl->link_interface);
+ if (ret)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
phy_detach(phy_dev);
@@ -912,7 +945,7 @@ int phylink_fixed_state_cb(struct phylink *pl,
/* It does not make sense to let the link be overriden unless we use
* MLO_AN_FIXED
*/
- if (pl->link_an_mode != MLO_AN_FIXED)
+ if (pl->cfg_link_an_mode != MLO_AN_FIXED)
return -EINVAL;
mutex_lock(&pl->state_mutex);
@@ -962,7 +995,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->link_an_mode),
+ phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -985,7 +1018,7 @@ void phylink_start(struct phylink *pl)
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
- if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
int irq = gpiod_to_irq(pl->link_gpio);
if (irq > 0) {
@@ -1000,7 +1033,8 @@ void phylink_start(struct phylink *pl)
if (irq <= 0)
mod_timer(&pl->link_poll, jiffies + HZ);
}
- if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
+ if ((pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) ||
+ pl->config->pcs_poll)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->phydev)
phy_start(pl->phydev);
@@ -1127,7 +1161,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -1199,7 +1233,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* If we have a fixed link (as specified by firmware), refuse
* to change link parameters.
*/
- if (pl->link_an_mode == MLO_AN_FIXED &&
+ if (pl->cur_link_an_mode == MLO_AN_FIXED &&
(s->speed != pl->link_config.speed ||
s->duplex != pl->link_config.duplex))
return -EINVAL;
@@ -1211,7 +1245,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
} else {
/* If we have a fixed link, refuse to enable autonegotiation */
- if (pl->link_an_mode == MLO_AN_FIXED)
+ if (pl->cur_link_an_mode == MLO_AN_FIXED)
return -EINVAL;
config.speed = SPEED_UNKNOWN;
@@ -1221,44 +1255,66 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
}
- if (phylink_validate(pl, support, &config))
- return -EINVAL;
-
- /* If autonegotiation is enabled, we must have an advertisement */
- if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
- return -EINVAL;
-
- our_kset = *kset;
- linkmode_copy(our_kset.link_modes.advertising, config.advertising);
- our_kset.base.speed = config.speed;
- our_kset.base.duplex = config.duplex;
-
- /* If we have a PHY, configure the phy */
if (pl->phydev) {
+ /* If we have a PHY, we process the kset change via phylib.
+ * phylib will call our link state function if the PHY
+ * parameters have changed, which will trigger a resolve
+ * and update the MAC configuration.
+ */
+ our_kset = *kset;
+ linkmode_copy(our_kset.link_modes.advertising,
+ config.advertising);
+ our_kset.base.speed = config.speed;
+ our_kset.base.duplex = config.duplex;
+
ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset);
if (ret)
return ret;
- }
- mutex_lock(&pl->state_mutex);
- /* Configure the MAC to match the new settings */
- linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
- pl->link_config.interface = config.interface;
- pl->link_config.speed = our_kset.base.speed;
- pl->link_config.duplex = our_kset.base.duplex;
- pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
+ mutex_lock(&pl->state_mutex);
+ /* Save the new configuration */
+ linkmode_copy(pl->link_config.advertising,
+ our_kset.link_modes.advertising);
+ pl->link_config.interface = config.interface;
+ pl->link_config.speed = our_kset.base.speed;
+ pl->link_config.duplex = our_kset.base.duplex;
+ pl->link_config.an_enabled = our_kset.base.autoneg !=
+ AUTONEG_DISABLE;
+ mutex_unlock(&pl->state_mutex);
+ } else {
+ /* For a fixed link, this isn't able to change any parameters,
+ * which just leaves inband mode.
+ */
+ if (phylink_validate(pl, support, &config))
+ return -EINVAL;
- /* If we have a PHY, phylib will call our link state function if the
- * mode has changed, which will trigger a resolve and update the MAC
- * configuration. For a fixed link, this isn't able to change any
- * parameters, which just leaves inband mode.
- */
- if (pl->link_an_mode == MLO_AN_INBAND &&
- !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
- phylink_mac_config(pl, &pl->link_config);
- phylink_mac_an_restart(pl);
+ /* If autonegotiation is enabled, we must have an advertisement */
+ if (config.an_enabled &&
+ phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
+ mutex_lock(&pl->state_mutex);
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+ pl->link_config.interface = config.interface;
+ pl->link_config.speed = config.speed;
+ pl->link_config.duplex = config.duplex;
+ pl->link_config.an_enabled = kset->base.autoneg !=
+ AUTONEG_DISABLE;
+
+ if (pl->cur_link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
+ /* If in 802.3z mode, this updates the advertisement.
+ *
+ * If we are in SGMII mode without a PHY, there is no
+ * advertisement; the only thing we have is the pause
+ * modes which can only come from a PHY.
+ */
+ phylink_mac_config(pl, &pl->link_config);
+ phylink_mac_an_restart(pl);
+ }
+ mutex_unlock(&pl->state_mutex);
}
- mutex_unlock(&pl->state_mutex);
return 0;
}
@@ -1343,7 +1399,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config);
@@ -1550,7 +1606,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -1575,7 +1631,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -1681,25 +1737,21 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static int phylink_sfp_module_insert(void *upstream,
- const struct sfp_eeprom_id *id)
+static int phylink_sfp_config(struct phylink *pl, u8 mode,
+ const unsigned long *supported,
+ const unsigned long *advertising)
{
- struct phylink *pl = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
phy_interface_t iface;
- int ret = 0;
bool changed;
- u8 port;
-
- ASSERT_RTNL();
+ int ret;
- sfp_parse_support(pl->sfp_bus, id, support);
- port = sfp_parse_port(pl->sfp_bus, id, support);
+ linkmode_copy(support, supported);
memset(&config, 0, sizeof(config));
- linkmode_copy(config.advertising, support);
+ linkmode_copy(config.advertising, advertising);
config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
@@ -1714,9 +1766,7 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
- linkmode_copy(support1, support);
-
- iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
+ iface = sfp_select_interface(pl->sfp_bus, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
phylink_err(pl,
"selection of interface failed, advertisement %*pb\n",
@@ -1725,18 +1775,18 @@ static int phylink_sfp_module_insert(void *upstream,
}
config.interface = iface;
+ linkmode_copy(support1, support);
ret = phylink_validate(pl, support1, &config);
if (ret) {
phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(MLO_AN_INBAND),
+ phylink_an_mode_str(mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
+ phylink_an_mode_str(mode), phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
@@ -1748,19 +1798,19 @@ static int phylink_sfp_module_insert(void *upstream,
linkmode_copy(pl->link_config.advertising, config.advertising);
}
- if (pl->link_an_mode != MLO_AN_INBAND ||
+ if (pl->cur_link_an_mode != mode ||
pl->link_config.interface != config.interface) {
pl->link_config.interface = config.interface;
- pl->link_an_mode = MLO_AN_INBAND;
+ pl->cur_link_an_mode = mode;
changed = true;
phylink_info(pl, "switched to %s/%s link mode\n",
- phylink_an_mode_str(MLO_AN_INBAND),
+ phylink_an_mode_str(mode),
phy_modes(config.interface));
}
- pl->link_port = port;
+ pl->link_port = pl->sfp_port;
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state))
@@ -1769,6 +1819,55 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
+static int phylink_sfp_module_insert(void *upstream,
+ const struct sfp_eeprom_id *id)
+{
+ struct phylink *pl = upstream;
+ unsigned long *support = pl->sfp_support;
+
+ ASSERT_RTNL();
+
+ linkmode_zero(support);
+ sfp_parse_support(pl->sfp_bus, id, support);
+ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+
+ /* If this module may have a PHY connecting later, defer until later */
+ pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
+ if (pl->sfp_may_have_phy)
+ return 0;
+
+ return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+}
+
+static int phylink_sfp_module_start(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, start the PHY now. */
+ if (pl->phydev) {
+ phy_start(pl->phydev);
+ return 0;
+ }
+
+ /* If the module may have a PHY but we didn't detect one we
+ * need to configure the MAC here.
+ */
+ if (!pl->sfp_may_have_phy)
+ return 0;
+
+ return phylink_sfp_config(pl, MLO_AN_INBAND,
+ pl->sfp_support, pl->sfp_support);
+}
+
+static void phylink_sfp_module_stop(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, stop it. */
+ if (pl->phydev)
+ phy_stop(pl->phydev);
+}
+
static void phylink_sfp_link_down(void *upstream)
{
struct phylink *pl = upstream;
@@ -1788,11 +1887,51 @@ static void phylink_sfp_link_up(void *upstream)
phylink_run_resolve(pl);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static bool phylink_phy_no_inband(struct phy_device *phy)
+{
+ return phy->is_c45 &&
+ (phy->c45_ids.device_ids[1] & 0xfffffff0) == 0xae025150;
+}
+
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
+ phy_interface_t interface;
+ u8 mode;
+ int ret;
- return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
+ /*
+ * This is the new way of dealing with flow control for PHYs,
+ * as described by Timur Tabi in commit 529ed1275263 ("net: phy:
+ * phy drivers should not set SUPPORTED_[Asym_]Pause") except
+ * using our validate call to the MAC, we rely upon the MAC
+ * clearing the bits from both supported and advertising fields.
+ */
+ phy_support_asym_pause(phy);
+
+ if (phylink_phy_no_inband(phy))
+ mode = MLO_AN_PHY;
+ else
+ mode = MLO_AN_INBAND;
+
+ /* Do the initial configuration */
+ ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising);
+ if (ret < 0)
+ return ret;
+
+ interface = pl->link_config.interface;
+ ret = phylink_attach_phy(pl, phy, interface);
+ if (ret < 0)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy, interface);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
}
static void phylink_sfp_disconnect_phy(void *upstream)
@@ -1804,6 +1943,8 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
.attach = phylink_sfp_attach,
.detach = phylink_sfp_detach,
.module_insert = phylink_sfp_module_insert,
+ .module_start = phylink_sfp_module_start,
+ .module_stop = phylink_sfp_module_stop,
.link_up = phylink_sfp_link_up,
.link_down = phylink_sfp_link_down,
.connect_phy = phylink_sfp_connect_phy,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 476db5345e1a..f5fa2fff3ddc 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -29,6 +29,8 @@
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
+#define RTL8211F_RX_DELAY BIT(3)
+
#define RTL8211E_TX_DELAY BIT(1)
#define RTL8211E_RX_DELAY BIT(2)
#define RTL8211E_MODE_MII_GMII BIT(3)
@@ -171,25 +173,66 @@ static int rtl8211c_config_init(struct phy_device *phydev)
static int rtl8211f_config_init(struct phy_device *phydev)
{
- u16 val;
+ struct device *dev = &phydev->mdio.dev;
+ u16 val_txdly, val_rxdly;
+ int ret;
- /* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and
- * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin.
- */
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
+ val_txdly = 0;
+ val_rxdly = 0;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_RXID:
- val = 0;
+ val_txdly = 0;
+ val_rxdly = RTL8211F_RX_DELAY;
break;
- case PHY_INTERFACE_MODE_RGMII_ID:
+
case PHY_INTERFACE_MODE_RGMII_TXID:
- val = RTL8211F_TX_DELAY;
+ val_txdly = RTL8211F_TX_DELAY;
+ val_rxdly = 0;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val_txdly = RTL8211F_TX_DELAY;
+ val_rxdly = RTL8211F_RX_DELAY;
break;
+
default: /* the rest of the modes imply leaving delay as is. */
return 0;
}
- return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
+ ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
+ val_txdly);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update the TX delay register\n");
+ return ret;
+ } else if (ret) {
+ dev_dbg(dev,
+ "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
+ val_txdly ? "Enabling" : "Disabling");
+ } else {
+ dev_dbg(dev,
+ "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
+ val_txdly ? "enabled" : "disabled");
+ }
+
+ ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
+ val_rxdly);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update the RX delay register\n");
+ return ret;
+ } else if (ret) {
+ dev_dbg(dev,
+ "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
+ val_rxdly ? "Enabling" : "Disabling");
+ } else {
+ dev_dbg(dev,
+ "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
+ val_rxdly ? "enabled" : "disabled");
+ }
+
+ return 0;
}
static int rtl8211e_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 5a72093ab6e7..d949ea7b4f8c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -103,6 +103,7 @@ static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
return NULL;
}
+
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -124,35 +125,35 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
/* port is the physical connector, set this from the connector field. */
switch (id->base.connector) {
- case SFP_CONNECTOR_SC:
- case SFP_CONNECTOR_FIBERJACK:
- case SFP_CONNECTOR_LC:
- case SFP_CONNECTOR_MT_RJ:
- case SFP_CONNECTOR_MU:
- case SFP_CONNECTOR_OPTICAL_PIGTAIL:
+ case SFF8024_CONNECTOR_SC:
+ case SFF8024_CONNECTOR_FIBERJACK:
+ case SFF8024_CONNECTOR_LC:
+ case SFF8024_CONNECTOR_MT_RJ:
+ case SFF8024_CONNECTOR_MU:
+ case SFF8024_CONNECTOR_OPTICAL_PIGTAIL:
+ case SFF8024_CONNECTOR_MPO_1X12:
+ case SFF8024_CONNECTOR_MPO_2X16:
port = PORT_FIBRE;
break;
- case SFP_CONNECTOR_RJ45:
+ case SFF8024_CONNECTOR_RJ45:
port = PORT_TP;
break;
- case SFP_CONNECTOR_COPPER_PIGTAIL:
+ case SFF8024_CONNECTOR_COPPER_PIGTAIL:
port = PORT_DA;
break;
- case SFP_CONNECTOR_UNSPEC:
+ case SFF8024_CONNECTOR_UNSPEC:
if (id->base.e1000_base_t) {
port = PORT_TP;
break;
}
/* fallthrough */
- case SFP_CONNECTOR_SG: /* guess */
- case SFP_CONNECTOR_MPO_1X12:
- case SFP_CONNECTOR_MPO_2X16:
- case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_NOSEPARATE:
- case SFP_CONNECTOR_MXC_2X16:
+ case SFF8024_CONNECTOR_SG: /* guess */
+ case SFF8024_CONNECTOR_HSSDC_II:
+ case SFF8024_CONNECTOR_NOSEPARATE:
+ case SFF8024_CONNECTOR_MXC_2X16:
port = PORT_OTHER;
break;
default:
@@ -179,6 +180,33 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
EXPORT_SYMBOL_GPL(sfp_parse_port);
/**
+ * sfp_may_have_phy() - indicate whether the module may have a PHY
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Parse the EEPROM identification given in @id, and return whether
+ * this module may have a PHY.
+ */
+bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+{
+ if (id->base.e1000_base_t)
+ return true;
+
+ if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
+ switch (id->base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sfp_may_have_phy);
+
+/**
* sfp_parse_support() - Parse the eeprom id for supported link modes
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
@@ -261,22 +289,33 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
switch (id->base.extended_cc) {
- case 0x00: /* Unspecified */
+ case SFF8024_ECC_UNSPEC:
break;
- case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */
+ case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
phylink_set(modes, 100000baseSR4_Full);
phylink_set(modes, 25000baseSR_Full);
break;
- case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */
- case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */
+ case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
+ case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
phylink_set(modes, 100000baseLR4_ER4_Full);
break;
- case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */
- case 0x0c: /* 25Gbase-CR CA-S */
- case 0x0d: /* 25Gbase-CR CA-N */
+ case SFF8024_ECC_100GBASE_CR4:
phylink_set(modes, 100000baseCR4_Full);
+ /* fallthrough */
+ case SFF8024_ECC_25GBASE_CR_S:
+ case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
break;
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ phylink_set(modes, 10000baseT_Full);
+ break;
+ case SFF8024_ECC_5GBASE_T:
+ phylink_set(modes, 5000baseT_Full);
+ break;
+ case SFF8024_ECC_2_5GBASE_T:
+ phylink_set(modes, 2500baseT_Full);
+ break;
default:
dev_warn(bus->sfp_dev,
"Unknown/unsupported extended compliance code: 0x%02x\n",
@@ -301,7 +340,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
*/
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
/* If the encoding and bit rate allows 1000baseX */
- if (id->base.encoding == SFP_ENCODING_8B10B && br_nom &&
+ if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom &&
br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
}
@@ -320,31 +359,27 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
/**
* sfp_select_interface() - Select appropriate phy_interface_t mode
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
* @link_modes: ethtool link modes mask
*
- * Derive the phy_interface_t mode for the information found in the
- * module's identifying EEPROM and the link modes mask. There is no
- * standard or defined way to derive this information, so we decide
- * based upon the link mode mask.
+ * Derive the phy_interface_t mode for the SFP module from the link
+ * modes mask.
*/
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes)
{
if (phylink_test(link_modes, 10000baseCR_Full) ||
phylink_test(link_modes, 10000baseSR_Full) ||
phylink_test(link_modes, 10000baseLR_Full) ||
phylink_test(link_modes, 10000baseLRM_Full) ||
- phylink_test(link_modes, 10000baseER_Full))
- return PHY_INTERFACE_MODE_10GKR;
+ phylink_test(link_modes, 10000baseER_Full) ||
+ phylink_test(link_modes, 10000baseT_Full))
+ return PHY_INTERFACE_MODE_10GBASER;
if (phylink_test(link_modes, 2500baseX_Full))
return PHY_INTERFACE_MODE_2500BASEX;
- if (id->base.e1000_base_t ||
- id->base.e100_base_lx ||
- id->base.e100_base_fx)
+ if (phylink_test(link_modes, 1000baseT_Half) ||
+ phylink_test(link_modes, 1000baseT_Full))
return PHY_INTERFACE_MODE_SGMII;
if (phylink_test(link_modes, 1000baseX_Full))
@@ -705,6 +740,27 @@ void sfp_module_remove(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
+int sfp_module_start(struct sfp_bus *bus)
+{
+ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
+ int ret = 0;
+
+ if (ops && ops->module_start)
+ ret = ops->module_start(bus->upstream);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sfp_module_start);
+
+void sfp_module_stop(struct sfp_bus *bus)
+{
+ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
+
+ if (ops && ops->module_stop)
+ ops->module_stop(bus->upstream);
+}
+EXPORT_SYMBOL_GPL(sfp_module_stop);
+
static void sfp_socket_clear(struct sfp_bus *bus)
{
bus->sfp_dev = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c0b9a8e4e65a..73c2969f11a4 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -59,8 +59,10 @@ enum {
SFP_DEV_UP,
SFP_S_DOWN = 0,
+ SFP_S_FAIL,
SFP_S_WAIT,
SFP_S_INIT,
+ SFP_S_INIT_PHY,
SFP_S_INIT_TX_FAULT,
SFP_S_WAIT_LOS,
SFP_S_LINK_UP,
@@ -122,8 +124,10 @@ static const char *event_to_str(unsigned short event)
static const char * const sm_state_strings[] = {
[SFP_S_DOWN] = "down",
+ [SFP_S_FAIL] = "fail",
[SFP_S_WAIT] = "wait",
[SFP_S_INIT] = "init",
+ [SFP_S_INIT_PHY] = "init_phy",
[SFP_S_INIT_TX_FAULT] = "init_tx_fault",
[SFP_S_WAIT_LOS] = "wait_los",
[SFP_S_LINK_UP] = "link_up",
@@ -155,10 +159,34 @@ static const enum gpiod_flags gpio_flags[] = {
GPIOD_ASIS,
};
-#define T_WAIT msecs_to_jiffies(50)
-#define T_INIT_JIFFIES msecs_to_jiffies(300)
-#define T_RESET_US 10
-#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+/* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a
+ * non-cooled module to initialise its laser safety circuitry. We wait
+ * an initial T_WAIT period before we check the tx fault to give any PHY
+ * on board (for a copper SFP) time to initialise.
+ */
+#define T_WAIT msecs_to_jiffies(50)
+#define T_START_UP msecs_to_jiffies(300)
+#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
+
+/* t_reset is the time required to assert the TX_DISABLE signal to reset
+ * an indicated TX_FAULT.
+ */
+#define T_RESET_US 10
+#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+
+/* N_FAULT_INIT is the number of recovery attempts at module initialisation
+ * time. If the TX_FAULT signal is not deasserted after this number of
+ * attempts at clearing it, we decide that the module is faulty.
+ * N_FAULT is the same but after the module has initialised.
+ */
+#define N_FAULT_INIT 5
+#define N_FAULT 5
+
+/* T_PHY_RETRY is the time interval between attempts to probe the PHY.
+ * R_PHY_RETRY is the number of attempts.
+ */
+#define T_PHY_RETRY msecs_to_jiffies(50)
+#define R_PHY_RETRY 12
/* SFP module presence detection is poor: the three MOD DEF signals are
* the same length on the PCB, which means it's possible for MOD DEF 0 to
@@ -214,10 +242,12 @@ struct sfp {
unsigned char sm_mod_tries;
unsigned char sm_dev_state;
unsigned short sm_state;
- unsigned int sm_retries;
+ unsigned char sm_fault_retries;
+ unsigned char sm_phy_retries;
struct sfp_eeprom_id id;
unsigned int module_power_mW;
+ unsigned int module_t_start_up;
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
@@ -231,7 +261,7 @@ struct sfp {
static bool sff_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFP_PHYS_ID_SFF &&
+ return id->base.phys_id == SFF8024_ID_SFF_8472 &&
id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
}
@@ -242,7 +272,7 @@ static const struct sff_data sff_data = {
static bool sfp_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFP_PHYS_ID_SFP &&
+ return id->base.phys_id == SFF8024_ID_SFP &&
id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
}
@@ -412,13 +442,20 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
{
unsigned int state = 0;
u8 status;
+ int ret;
- if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
- sizeof(status)) {
+ ret = sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status));
+ if (ret == sizeof(status)) {
if (status & SFP_STATUS_RX_LOS)
state |= SFP_F_LOS;
if (status & SFP_STATUS_TX_FAULT)
state |= SFP_F_TX_FAULT;
+ } else {
+ dev_err_ratelimited(sfp->dev,
+ "failed to read SFP soft status: %d\n",
+ ret);
+ /* Preserve the current state */
+ state = sfp->state;
}
return state & sfp->state_soft_mask;
@@ -1383,26 +1420,30 @@ static void sfp_sm_mod_next(struct sfp *sfp, unsigned int state,
static void sfp_sm_phy_detach(struct sfp *sfp)
{
- phy_stop(sfp->mod_phy);
sfp_remove_phy(sfp->sfp_bus);
phy_device_remove(sfp->mod_phy);
phy_device_free(sfp->mod_phy);
sfp->mod_phy = NULL;
}
-static void sfp_sm_probe_phy(struct sfp *sfp)
+static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
{
struct phy_device *phy;
int err;
- phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
- if (phy == ERR_PTR(-ENODEV)) {
- dev_info(sfp->dev, "no PHY detected\n");
- return;
- }
+ phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ if (phy == ERR_PTR(-ENODEV))
+ return PTR_ERR(phy);
if (IS_ERR(phy)) {
dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
- return;
+ return PTR_ERR(phy);
+ }
+
+ err = phy_device_register(phy);
+ if (err) {
+ phy_device_free(phy);
+ dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+ return err;
}
err = sfp_add_phy(sfp->sfp_bus, phy);
@@ -1410,11 +1451,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
phy_device_remove(phy);
phy_device_free(phy);
dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
- return;
+ return err;
}
sfp->mod_phy = phy;
- phy_start(phy);
+
+ return 0;
}
static void sfp_sm_link_up(struct sfp *sfp)
@@ -1464,7 +1506,7 @@ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
{
- if (sfp->sm_retries && !--sfp->sm_retries) {
+ if (sfp->sm_fault_retries && !--sfp->sm_fault_retries) {
dev_err(sfp->dev,
"module persistently indicates fault, disabling\n");
sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0);
@@ -1476,21 +1518,35 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
}
}
-static void sfp_sm_probe_for_phy(struct sfp *sfp)
+/* Probe a SFP for a PHY device if the module supports copper - the PHY
+ * normally sits at I2C bus address 0x56, and may either be a clause 22
+ * or clause 45 PHY.
+ *
+ * Clause 22 copper SFP modules normally operate in Cisco SGMII mode with
+ * negotiation enabled, but some may be in 1000base-X - which is for the
+ * PHY driver to determine.
+ *
+ * Clause 45 copper SFP+ modules (10G) appear to switch their interface
+ * mode according to the negotiated line speed.
+ */
+static int sfp_sm_probe_for_phy(struct sfp *sfp)
{
- /* Setting the serdes link mode is guesswork: there's no
- * field in the EEPROM which indicates what mode should
- * be used.
- *
- * If it's a gigabit-only fiber module, it probably does
- * not have a PHY, so switch to 802.3z negotiation mode.
- * Otherwise, switch to SGMII mode (which is required to
- * support non-gigabit speeds) and probe for a PHY.
- */
- if (sfp->id.base.e1000_base_t ||
- sfp->id.base.e100_base_lx ||
- sfp->id.base.e100_base_fx)
- sfp_sm_probe_phy(sfp);
+ int err = 0;
+
+ switch (sfp->id.base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+ err = sfp_sm_probe_phy(sfp, true);
+ break;
+
+ default:
+ if (sfp->id.base.e1000_base_t)
+ err = sfp_sm_probe_phy(sfp, false);
+ break;
+ }
+ return err;
}
static int sfp_module_parse_power(struct sfp *sfp)
@@ -1550,6 +1606,13 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
return -EAGAIN;
}
+ /* DM7052 reports as a high power module, responds to reads (with
+ * all bytes 0xff) at 0x51 but does not accept writes. In any case,
+ * if the bit is already set, we're already in high power mode.
+ */
+ if (!!(val & BIT(0)) == enable)
+ return 0;
+
if (enable)
val |= BIT(0);
else
@@ -1655,6 +1718,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
if (ret < 0)
return ret;
+ if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
+ !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
+ sfp->module_t_start_up = T_START_UP_BAD_GPON;
+ else
+ sfp->module_t_start_up = T_START_UP;
+
return 0;
}
@@ -1812,6 +1881,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
static void sfp_sm_main(struct sfp *sfp, unsigned int event)
{
unsigned long timeout;
+ int ret;
/* Some events are global */
if (sfp->sm_state != SFP_S_DOWN &&
@@ -1820,6 +1890,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
if (sfp->sm_state == SFP_S_LINK_UP &&
sfp->sm_dev_state == SFP_DEV_UP)
sfp_sm_link_down(sfp);
+ if (sfp->sm_state > SFP_S_INIT)
+ sfp_module_stop(sfp->sfp_bus);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
sfp_module_tx_disable(sfp);
@@ -1841,7 +1913,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp_module_tx_enable(sfp);
/* Initialise the fault clearance retries */
- sfp->sm_retries = 5;
+ sfp->sm_fault_retries = N_FAULT_INIT;
/* We need to check the TX_FAULT state, which is not defined
* while TX_DISABLE is asserted. The earliest we want to do
@@ -1855,11 +1927,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
break;
if (sfp->state & SFP_F_TX_FAULT) {
- /* Wait t_init before indicating that the link is up,
- * provided the current state indicates no TX_FAULT. If
- * TX_FAULT clears before this time, that's fine too.
+ /* Wait up to t_init (SFF-8472) or t_start_up (SFF-8431)
+ * from the TX_DISABLE deassertion for the module to
+ * initialise, which is indicated by TX_FAULT
+ * deasserting.
*/
- timeout = T_INIT_JIFFIES;
+ timeout = sfp->module_t_start_up;
if (timeout > T_WAIT)
timeout -= T_WAIT;
else
@@ -1876,27 +1949,51 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_INIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- /* TX_FAULT is still asserted after t_init, so assume
- * there is a fault.
+ /* TX_FAULT is still asserted after t_init or
+ * or t_start_up, so assume there is a fault.
*/
sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
- sfp->sm_retries == 5);
+ sfp->sm_fault_retries == N_FAULT_INIT);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
- init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
- * clear. Probe for the PHY and check the LOS state.
- */
- sfp_sm_probe_for_phy(sfp);
- sfp_sm_link_check_los(sfp);
+ init_done:
+ sfp->sm_phy_retries = R_PHY_RETRY;
+ goto phy_probe;
+ }
+ break;
- /* Reset the fault retry count */
- sfp->sm_retries = 5;
+ case SFP_S_INIT_PHY:
+ if (event != SFP_E_TIMEOUT)
+ break;
+ phy_probe:
+ /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ ret = sfp_sm_probe_for_phy(sfp);
+ if (ret == -ENODEV) {
+ if (--sfp->sm_phy_retries) {
+ sfp_sm_next(sfp, SFP_S_INIT_PHY, T_PHY_RETRY);
+ break;
+ } else {
+ dev_info(sfp->dev, "no PHY detected\n");
+ }
+ } else if (ret) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
}
+ if (sfp_module_start(sfp->sfp_bus)) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
+ }
+ sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+ sfp->sm_fault_retries = N_FAULT;
break;
case SFP_S_INIT_TX_FAULT:
if (event == SFP_E_TIMEOUT) {
sfp_module_tx_fault_reset(sfp);
- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
+ sfp_sm_next(sfp, SFP_S_INIT, sfp->module_t_start_up);
}
break;
@@ -1920,7 +2017,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_TX_FAULT:
if (event == SFP_E_TIMEOUT) {
sfp_module_tx_fault_reset(sfp);
- sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES);
+ sfp_sm_next(sfp, SFP_S_REINIT, sfp->module_t_start_up);
}
break;
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 64f54b0bbd8c..b83f70526270 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -22,6 +22,8 @@ void sfp_link_up(struct sfp_bus *bus);
void sfp_link_down(struct sfp_bus *bus);
int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_module_remove(struct sfp_bus *bus);
+int sfp_module_start(struct sfp_bus *bus);
+void sfp_module_stop(struct sfp_bus *bus);
int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops);
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index a32b3fd8a370..38834347a427 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -68,12 +68,7 @@ static int upd60620_read_status(struct phy_device *phydev)
mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
phy_state);
- if (phydev->duplex == DUPLEX_FULL) {
- if (phy_state & LPA_PAUSE_CAP)
- phydev->pause = 1;
- if (phy_state & LPA_PAUSE_ASYM)
- phydev->asym_pause = 1;
- }
+ phy_resolve_aneg_pause(phydev);
}
}
return 0;
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index a7b9cf3269bf..29a0917a81e6 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -874,15 +874,15 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb)
goto nomem;
- ap->rpkt = skb;
- }
- if (skb->len == 0) {
- /* Try to get the payload 4-byte aligned.
- * This should match the
- * PPP_ALLSTATIONS/PPP_UI/compressed tests in
- * process_input_packet, but we do not have
- * enough chars here to test buf[1] and buf[2].
- */
+ ap->rpkt = skb;
+ }
+ if (skb->len == 0) {
+ /* Try to get the payload 4-byte aligned.
+ * This should match the
+ * PPP_ALLSTATIONS/PPP_UI/compressed tests in
+ * process_input_packet, but we do not have
+ * enough chars here to test buf[1] and buf[2].
+ */
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3bf8a8b42983..22cc2cb9d878 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -296,8 +296,6 @@ static struct class *ppp_class;
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
{
- BUG_ON(!net);
-
return net_generic(net, ppp_net_id);
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e1fabb3e3246..acccb747aeda 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -155,7 +155,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
- RT_TOS(0), 0);
+ RT_TOS(0), sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto tx_error;
@@ -444,7 +444,8 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
- IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+ IPPROTO_GRE, RT_CONN_FLAGS(sk),
+ sk->sk_bound_dev_if);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 2a91c192659f..6f4d7ba8b109 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -452,12 +452,19 @@ static void slip_transmit(struct work_struct *work)
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
- struct slip *sl = tty->disc_data;
+ struct slip *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
-static void sl_tx_timeout(struct net_device *dev)
+static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slip *sl = netdev_priv(dev);
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index a6d63665ad03..1f4bdd94407a 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -341,6 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
features |= tap->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+ struct sk_buff *next;
if (IS_ERR(segs))
goto drop;
@@ -352,16 +353,13 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
}
consume_skb(skb);
- while (segs) {
- struct sk_buff *nskb = segs->next;
-
- segs->next = NULL;
- if (ptr_ring_produce(&q->ring, segs)) {
- kfree_skb(segs);
- kfree_skb_list(nskb);
+ skb_list_walk_safe(segs, skb, next) {
+ skb_mark_not_on_list(skb);
+ if (ptr_ring_produce(&q->ring, skb)) {
+ kfree_skb(skb);
+ kfree_skb_list(next);
break;
}
- segs = nskb;
}
} else {
/* If we receive a partial checksum and the tap side
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 683d371e6e82..650c937ed56b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1718,7 +1718,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (err < 0)
goto err_xdp;
if (err == XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (err != XDP_PASS)
goto out;
@@ -1936,6 +1936,10 @@ drop:
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
return total_len;
}
}
@@ -2549,7 +2553,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
}
if (flush)
- xdp_do_flush_map();
+ xdp_do_flush();
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index af3994e0853b..4e514f5d7c6c 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -39,17 +39,6 @@ static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
return 0;
}
-static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- if (!netif_running(net))
- return -EINVAL;
-
- if (!net->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(net->phydev, rq, cmd);
-}
-
/* set MAC link settings according to information from phylib */
static void ax88172a_adjust_link(struct net_device *netdev)
{
@@ -134,7 +123,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ax88172a_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 1e58702c737f..d387bc7ac1b6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -447,7 +447,7 @@ static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void catc_tx_timeout(struct net_device *netdev)
+static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct catc *catc = netdev_priv(netdev);
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 9df3c1ffff35..d7f3b70d5477 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -111,8 +111,8 @@ static int control_read(struct usbnet *dev,
request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE);
- netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
- index, size);
+ netdev_dbg(dev->net, "%s() index=0x%02x size=%d\n",
+ __func__, index, size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
@@ -130,8 +130,6 @@ static int control_read(struct usbnet *dev,
err = -EINVAL;
kfree(buf);
- return err;
-
err_out:
return err;
}
@@ -151,8 +149,8 @@ static int control_write(struct usbnet *dev, unsigned char request,
request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE);
- netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
- index, size);
+ netdev_dbg(dev->net, "%s() index=0x%02x size=%d\n",
+ __func__, index, size);
if (data) {
buf = kmemdup(data, size, GFP_KERNEL);
@@ -181,8 +179,8 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
- netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
- phy_id, loc);
+ netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
+ __func__, phy_id, loc);
if (phy_id != 0)
return -ENODEV;
@@ -199,8 +197,8 @@ static void ch9200_mdio_write(struct net_device *netdev,
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
- netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
- phy_id, loc);
+ netdev_dbg(netdev, "%s() phy_id=%02x loc:%02x\n",
+ __func__, phy_id, loc);
if (phy_id != 0)
return;
@@ -219,8 +217,8 @@ static int ch9200_link_reset(struct usbnet *dev)
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
- netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
- ecmd.speed, ecmd.duplex);
+ netdev_dbg(dev->net, "%s() speed:%d duplex:%d\n",
+ __func__, ecmd.speed, ecmd.duplex);
return 0;
}
@@ -309,7 +307,7 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data)
unsigned char mac_addr[0x06];
int rd_mac_len = 0;
- netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
+ netdev_dbg(dev->net, "%s:\n\tusbnet VID:%0x PID:%0x\n", __func__,
le16_to_cpu(dev->udev->descriptor.idVendor),
le16_to_cpu(dev->udev->descriptor.idProduct));
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index ca827802f291..417e42c9fd03 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -820,7 +820,7 @@ static const struct ethtool_ops ops = {
};
/* called when a packet did not ack after watchdogtimeout */
-static void hso_net_tx_timeout(struct net_device *net)
+static void hso_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct hso_net *odev = netdev_priv(net);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 8c01fbf68a89..c792d65dd7b4 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -400,7 +400,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static void ipheth_tx_timeout(struct net_device *net)
+static void ipheth_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct ipheth_device *dev = netdev_priv(net);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 8e210ba4a313..ed01dc964c99 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -894,7 +894,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
/****************************************************************
* kaweth_tx_timeout
****************************************************************/
-static void kaweth_tx_timeout(struct net_device *net)
+static void kaweth_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct kaweth_device *kaweth = netdev_priv(net);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index fb4781080d6d..eccbf4cd7149 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -20,6 +20,7 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
@@ -1663,14 +1664,6 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
-}
-
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
@@ -3660,7 +3653,7 @@ static void lan78xx_disconnect(struct usb_interface *intf)
usb_put_dev(udev);
}
-static void lan78xx_tx_timeout(struct net_device *net)
+static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct lan78xx_net *dev = netdev_priv(net);
@@ -3668,6 +3661,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
tasklet_schedule(&dev->bh);
}
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
@@ -3676,11 +3682,12 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_change_mtu = lan78xx_change_mtu,
.ndo_set_mac_address = lan78xx_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = lan78xx_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = lan78xx_set_multicast,
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+ .ndo_features_check = lan78xx_features_check,
};
static void lan78xx_stat_monitor(struct timer_list *t)
@@ -3750,6 +3757,7 @@ static int lan78xx_probe(struct usb_interface *intf,
/* MTU range: 68 - 9000 */
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f7d117d80cfb..8783e2ab3ec0 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -693,7 +693,7 @@ static void intr_callback(struct urb *urb)
"can't resubmit interrupt urb, %d\n", res);
}
-static void pegasus_tx_timeout(struct net_device *net)
+static void pegasus_tx_timeout(struct net_device *net, unsigned int txqueue)
{
pegasus_t *pegasus = netdev_priv(net);
netif_warn(pegasus, timer, net, "tx timeout\n");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4196c0e32740..9485c8d1de8a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c5ebf35d2488..e8cd8c05b156 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
#define NETNEXT_VERSION "11"
/* Information for net */
-#define NET_VERSION "10"
+#define NET_VERSION "11"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -68,6 +68,7 @@
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
#define PLA_BOOT_CTRL 0xe004
+#define PLA_LWAKE_CTRL_REG 0xe007
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
@@ -95,6 +96,7 @@
#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
+#define PLA_CONFIG6 0xe90a /* CONFIG6 */
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
@@ -107,6 +109,7 @@
#define PLA_BP_EN 0xfc38
#define USB_USB2PHY 0xb41e
+#define USB_SSPHYLINK1 0xb426
#define USB_SSPHYLINK2 0xb428
#define USB_U2P3_CTRL 0xb460
#define USB_CSR_DUMMY1 0xb464
@@ -300,6 +303,9 @@
#define LINK_ON_WAKE_EN 0x0010
#define LINK_OFF_WAKE_EN 0x0008
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN BIT(0)
+
/* PLA_CONFIG5 */
#define BWF_EN 0x0040
#define MWF_EN 0x0020
@@ -312,6 +318,7 @@
/* PLA_PHY_PWR */
#define TX_10M_IDLE_EN 0x0080
#define PFM_PWM_SWITCH 0x0040
+#define TEST_IO_OFF BIT(4)
/* PLA_MAC_PWR_CTRL */
#define D3_CLK_GATED_EN 0x00004000
@@ -324,6 +331,7 @@
#define MAC_CLK_SPDWN_EN BIT(15)
/* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN BIT(14)
#define PKT_AVAIL_SPDWN_EN 0x0100
#define SUSPEND_SPDWN_EN 0x0004
#define U1U2_SPDWN_EN 0x0002
@@ -354,6 +362,9 @@
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN BIT(7)
+
/* PLA_SUSPEND_FLAG */
#define LINK_CHG_EVENT BIT(0)
@@ -365,13 +376,18 @@
#define DEBUG_LTSSM 0x0082
/* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK BIT(15)
#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
+#define POLL_LINK_CHG BIT(0)
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG BIT(1)
+
/* USB_SSPHYLINK2 */
#define pwd_dn_scale_mask 0x3ffe
#define pwd_dn_scale(x) ((x) << 1)
@@ -1897,8 +1913,8 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
{
if (skb_shinfo(skb)->gso_size) {
netdev_features_t features = tp->netdev->features;
+ struct sk_buff *segs, *seg, *next;
struct sk_buff_head seg_list;
- struct sk_buff *segs, *nskb;
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
@@ -1907,12 +1923,10 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
__skb_queue_head_init(&seg_list);
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- __skb_queue_tail(&seg_list, nskb);
- } while (segs);
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+ __skb_queue_tail(&seg_list, seg);
+ }
skb_queue_splice(&seg_list, list);
dev_kfree_skb(skb);
@@ -2507,7 +2521,7 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
}
}
-static void rtl8152_tx_timeout(struct net_device *netdev)
+static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -2863,6 +2877,17 @@ static int rtl8153_enable(struct r8152 *tp)
r8153_set_rx_early_timeout(tp);
r8153_set_rx_early_size(tp);
+ if (tp->version == RTL_VER_09) {
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data &= ~FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ usleep_range(1000, 2000);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ }
+
return rtl_enable(tp);
}
@@ -3376,8 +3401,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
}
@@ -4675,7 +4700,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
- r8153_u2p3en(tp, true);
set_bit(PHY_RESET, &tp->flags);
}
@@ -4954,6 +4978,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -4961,6 +4987,19 @@ static void rtl8153_up(struct r8152 *tp)
r8153_u2p3en(tp, false);
r8153_aldps_en(tp, false);
r8153_first_init(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+ ocp_data &= ~DELAY_PHY_PWR_CHG;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
r8153_aldps_en(tp, true);
switch (tp->version) {
@@ -4979,11 +5018,17 @@ static void rtl8153_up(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data &= ~LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
@@ -4994,6 +5039,8 @@ static void rtl8153_down(struct r8152 *tp)
static void rtl8153b_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -5004,18 +5051,29 @@ static void rtl8153b_up(struct r8152 *tp)
r8153_first_init(tp);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153_aldps_en(tp, true);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
static void rtl8153b_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data |= PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153b_power_cut_en(tp, false);
@@ -5387,6 +5445,16 @@ static void r8153_init(struct r8152 *tp)
else
ocp_data |= DYNAMIC_BURST;
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+ r8153_queue_wake(tp, false);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -5416,10 +5484,19 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
r8153_power_cut_en(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
r8153_u1u2en(tp, true);
r8153_mac_clk_spd(tp, false);
usb_enable_lpm(tp->udev);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@@ -5484,7 +5561,17 @@ static void r8153b_init(struct r8152 *tp)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153b_u1u2en(tp, true);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
@@ -5492,6 +5579,19 @@ static void r8153b_init(struct r8152 *tp)
ocp_data |= MAC_CLK_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ if (tp->version == RTL_VER_09) {
+ /* Disable Test IO for 32QFN */
+ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= TEST_IO_OFF;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+ }
+ }
+
set_bit(GREEN_ETHERNET, &tp->flags);
/* rx aggregation */
@@ -6597,6 +6697,9 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENODEV;
}
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -6704,6 +6807,11 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+ else
+ tp->saved_wolopts = __rtl_get_wol(tp);
+
tp->rtl_ops.init(tp);
#if IS_BUILTIN(CONFIG_USB_RTL8152)
/* Retry in case request_firmware() is not ready yet. */
@@ -6721,10 +6829,6 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
- if (!rtl_can_wakeup(tp))
- __rtl_set_wol(tp, 0);
-
- tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
else
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 13e51ccf0214..e7c630d37589 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -655,7 +655,7 @@ static void disable_net_traffic(rtl8150_t * dev)
set_registers(dev, CR, 1, &cr);
}
-static void rtl8150_tx_timeout(struct net_device *netdev)
+static void rtl8150_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
rtl8150_t *dev = netdev_priv(netdev);
dev_warn(&netdev->dev, "Tx timeout.\n");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9ce6d30576dd..5ec97def3513 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1293,7 +1293,7 @@ static void tx_complete (struct urb *urb)
/*-------------------------------------------------------------------------*/
-void usbnet_tx_timeout (struct net_device *net)
+void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
{
struct usbnet *dev = netdev_priv(net);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a552df37a347..8cdc4415fa70 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -377,6 +377,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
unsigned int max_len;
struct veth_rq *rq;
+ rcu_read_lock();
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
goto drop;
@@ -418,11 +419,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- if (likely(!drops))
+ if (likely(!drops)) {
+ rcu_read_unlock();
return n;
+ }
ret = n - drops;
drop:
+ rcu_read_unlock();
atomic64_add(drops, &priv->dropped);
return ret;
@@ -769,7 +773,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
if (xdp_xmit & VETH_XDP_TX)
veth_xdp_flush(rq->dev, &bq);
if (xdp_xmit & VETH_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
xdp_clear_return_frame_no_direct();
return done;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4d7d5434cc5d..2fe7a3188282 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -501,7 +501,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
- xdp_prog = rcu_dereference(rq->xdp_prog);
+ xdp_prog = rcu_access_pointer(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
@@ -1432,7 +1432,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
virtqueue_napi_complete(napi, rq->vq, received);
if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_sq(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 216acf37ca7c..18f152fa0068 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3198,7 +3198,7 @@ vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
static void
-vmxnet3_tx_timeout(struct net_device *netdev)
+vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 0a38c76688ab..1e4b9ba70983 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -555,10 +555,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
}
if (VMXNET3_VERSION_GE_3(adapter)) {
- if (param->rx_mini_pending < 0 ||
- param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
+ if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE)
return -EINVAL;
- }
} else if (param->rx_mini_pending != 0) {
return -EINVAL;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1c5159dcc720..d3b08b76b1ec 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1060,6 +1060,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
return err;
} else {
union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
+
if (remote->sa.sa_family == AF_INET) {
ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
ip->sa.sa_family = AF_INET;
@@ -1696,7 +1697,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
!net_eq(vxlan->net, dev_net(vxlan->dev))))
- goto drop;
+ goto drop;
if (vxlan_collect_metadata(vs)) {
struct metadata_dst *tun_dst;
@@ -4128,30 +4129,30 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
- !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
- !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
nla_put_u8(skb, IFLA_VXLAN_RSC,
!!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
- !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
- !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
!!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
- !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
+ !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
- !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
- !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
- !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index dd1a147f2971..4530840e15ef 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -315,7 +315,8 @@ config DSCC4_PCI_RST
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
- depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR
+ depends on ARCH_IXP4XX
help
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index af539151d663..5d6532ad6b78 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -268,7 +268,7 @@ static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity);
static int cosa_net_open(struct net_device *d);
static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d);
+static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
static char *cosa_net_setup_rx(struct channel_data *channel, int size);
static int cosa_net_rx_done(struct channel_data *channel);
@@ -670,7 +670,7 @@ static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void cosa_net_timeout(struct net_device *dev)
+static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
{
struct channel_data *chan = dev_to_chan(dev);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1901ec7948d8..7916efce7188 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2239,7 +2239,7 @@ fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parit
}
static void
-fst_tx_timeout(struct net_device *dev)
+fst_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fst_port_info *port;
struct fst_card_info *card;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index ca0f3be2b6bf..3998cac49d7f 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = {
},
};
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv)
{
@@ -635,11 +635,9 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
struct net_device *dev = priv->ndev;
struct ucc_fast_private *uccf;
- struct ucc_tdm_info *ut_info;
u32 ucce;
u32 uccm;
- ut_info = priv->ut_info;
uccf = priv->uccf;
ucce = ioread32be(uccf->p_ucce);
@@ -872,7 +870,6 @@ static void resume_clk_config(struct ucc_hdlc_private *priv)
static int uhdlc_suspend(struct device *dev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
- struct ucc_tdm_info *ut_info;
struct ucc_fast __iomem *uf_regs;
if (!priv)
@@ -884,7 +881,6 @@ static int uhdlc_suspend(struct device *dev)
netif_device_detach(priv->ndev);
napi_disable(&priv->napi);
- ut_info = priv->ut_info;
uf_regs = priv->uf_regs;
/* backup gumr guemr*/
@@ -917,7 +913,7 @@ static int uhdlc_resume(struct device *dev)
struct ucc_fast __iomem *uf_regs;
struct ucc_fast_private *uccf;
struct ucc_fast_info *uf_info;
- int ret, i;
+ int i;
u32 cecr_subblock;
u16 bd_status;
@@ -962,16 +958,16 @@ static int uhdlc_resume(struct device *dev)
/* Write to QE CECR, UCCx channel to Stop Transmission */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
- ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Set UPSMR normal mode */
iowrite32be(0, &uf_regs->upsmr);
/* init parameter base */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
- ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
qe_muram_addr(priv->ucc_pram_offset);
@@ -1039,7 +1035,7 @@ static const struct dev_pm_ops uhdlc_pm_ops = {
#define HDLC_PM_OPS NULL
#endif
-static void uhdlc_tx_timeout(struct net_device *ndev)
+static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netdev_err(ndev, "%s\n", __func__);
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index a030f5aa6b95..d8cba3625c18 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -75,7 +75,7 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
{
struct hdlc_header *data;
#ifdef DEBUG_HARD_HEADER
- printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+ netdev_dbg(dev, "%s called\n", __func__);
#endif
skb_push(skb, sizeof(struct hdlc_header));
@@ -101,7 +101,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
if (!skb) {
- netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
+ netdev_warn(dev, "Memory squeeze on %s()\n", __func__);
return;
}
skb_reserve(skb, 4);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 5643675ff724..c84536b03aa8 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -21,8 +21,17 @@
#include <linux/skbuff.h>
#include <net/x25device.h>
+struct x25_state {
+ x25_hdlc_proto settings;
+};
+
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+static struct x25_state *state(hdlc_device *hdlc)
+{
+ return hdlc->state;
+}
+
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
@@ -62,11 +71,12 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
- skb_push(skb, 1);
-
if (skb_cow(skb, 1))
return NET_RX_DROP;
+ skb_push(skb, 1);
+ skb_reset_network_header(skb);
+
ptr = skb->data;
*ptr = X25_IFACE_DATA;
@@ -79,6 +89,13 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ skb_reset_network_header(skb);
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ if (dev_nit_active(dev))
+ dev_queue_xmit_nit(skb, dev);
+
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
@@ -93,6 +110,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
+ skb_reset_network_header(skb);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -131,7 +149,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
static int x25_open(struct net_device *dev)
{
- int result;
static const struct lapb_register_struct cb = {
.connect_confirmation = x25_connected,
.connect_indication = x25_connected,
@@ -140,10 +157,33 @@ static int x25_open(struct net_device *dev)
.data_indication = x25_data_indication,
.data_transmit = x25_data_transmit,
};
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct lapb_parms_struct params;
+ int result;
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
return result;
+
+ result = lapb_getparms(dev, &params);
+ if (result != LAPB_OK)
+ return result;
+
+ if (state(hdlc)->settings.dce)
+ params.mode = params.mode | LAPB_DCE;
+
+ if (state(hdlc)->settings.modulo == 128)
+ params.mode = params.mode | LAPB_EXTENDED;
+
+ params.window = state(hdlc)->settings.window;
+ params.t1 = state(hdlc)->settings.t1;
+ params.t2 = state(hdlc)->settings.t2;
+ params.n2 = state(hdlc)->settings.n2;
+
+ result = lapb_setparms(dev, &params);
+ if (result != LAPB_OK)
+ return result;
+
return 0;
}
@@ -186,7 +226,10 @@ static struct hdlc_proto proto = {
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
{
+ x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
+ const size_t size = sizeof(x25_hdlc_proto);
hdlc_device *hdlc = dev_to_hdlc(dev);
+ x25_hdlc_proto new_settings;
int result;
switch (ifr->ifr_settings.type) {
@@ -194,7 +237,13 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifr->ifr_settings.type = IF_PROTO_X25;
- return 0; /* return protocol only, no settable parameters */
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(x25_s, &state(hdlc)->settings, size))
+ return -EFAULT;
+ return 0;
case IF_PROTO_X25:
if (!capable(CAP_NET_ADMIN))
@@ -203,12 +252,46 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (dev->flags & IFF_UP)
return -EBUSY;
+ /* backward compatibility */
+ if (ifr->ifr_settings.size == 0) {
+ new_settings.dce = 0;
+ new_settings.modulo = 8;
+ new_settings.window = 7;
+ new_settings.t1 = 3;
+ new_settings.t2 = 1;
+ new_settings.n2 = 10;
+ }
+ else {
+ if (copy_from_user(&new_settings, x25_s, size))
+ return -EFAULT;
+
+ if ((new_settings.dce != 0 &&
+ new_settings.dce != 1) ||
+ (new_settings.modulo != 8 &&
+ new_settings.modulo != 128) ||
+ new_settings.window < 1 ||
+ (new_settings.modulo == 8 &&
+ new_settings.window > 7) ||
+ (new_settings.modulo == 128 &&
+ new_settings.window > 127) ||
+ new_settings.t1 < 1 ||
+ new_settings.t1 > 255 ||
+ new_settings.t2 < 1 ||
+ new_settings.t2 > 255 ||
+ new_settings.n2 < 1 ||
+ new_settings.n2 > 255)
+ return -EINVAL;
+ }
+
result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto, 0)))
+ if ((result = attach_hdlc_protocol(dev, &proto,
+ sizeof(struct x25_state))))
return result;
+
+ memcpy(&state(hdlc)->settings, &new_settings, size);
dev->type = ARPHRD_X25;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index ea6ee6a608ce..7c5cf77e9ef1 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/wan_ixp4xx_hss.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/soc/ixp4xx/npe.h>
@@ -258,7 +259,7 @@ struct port {
struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
- u32 desc_tab_phys;
+ dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
@@ -858,7 +859,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+ memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
@@ -1182,14 +1183,14 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
}
}
-static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
+static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c,
u32 *best, u32 *best_diff, u32 *reg)
{
/* a is 10-bit, b is 10-bit, c is 12-bit */
u64 new_rate;
u32 new_diff;
- new_rate = ixp4xx_timer_freq * (u64)(c + 1);
+ new_rate = timer_freq * (u64)(c + 1);
do_div(new_rate, a * (c + 1) + b + 1);
new_diff = abs((u32)new_rate - rate);
@@ -1201,40 +1202,43 @@ static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
return new_diff;
}
-static void find_best_clock(u32 rate, u32 *best, u32 *reg)
+static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
{
u32 a, b, diff = 0xFFFFFFFF;
- a = ixp4xx_timer_freq / rate;
+ a = timer_freq / rate;
if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
- check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
+ check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg);
return;
}
if (a == 0) { /* > 66.666 MHz */
a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
- rate = ixp4xx_timer_freq;
+ rate = timer_freq;
}
- if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
- check_clock(rate, a - 1, 1, 1, best, &diff, reg);
+ if (rate * a == timer_freq) { /* don't divide by 0 later */
+ check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg);
return;
}
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
- do_div(c, ixp4xx_timer_freq - rate * a);
+ do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
if (b == 0 && /* also try a bit higher rate */
- !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
+ !check_clock(timer_freq, rate, a - 1, 1, 1, best,
+ &diff, reg))
return;
- check_clock(rate, a, b, 0xFFF, best, &diff, reg);
+ check_clock(timer_freq, rate, a, b, 0xFFF, best,
+ &diff, reg);
return;
}
- if (!check_clock(rate, a, b, c, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c, best, &diff, reg))
return;
- if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff,
+ reg))
return;
}
}
@@ -1285,8 +1289,9 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
port->clock_type = clk; /* Update settings */
if (clk == CLOCK_INT)
- find_best_clock(new_line.clock_rate, &port->clock_rate,
- &port->clock_reg);
+ find_best_clock(port->plat->timer_freq,
+ new_line.clock_rate,
+ &port->clock_rate, &port->clock_reg);
else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 0f1217b506ad..e30d91a38cfb 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
- list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+ list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
if (lapbeth->ethdev == dev)
return lapbeth;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 0e6a51525d91..a20f467ca48a 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -99,7 +99,7 @@ static int lmc_ifdown(struct net_device * const);
static void lmc_watchdog(struct timer_list *t);
static void lmc_reset(lmc_softc_t * const sc);
static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev);
+static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
/*
* linux reserves 16 device specific IOCTLs. We call them
@@ -2044,7 +2044,7 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
-static void lmc_driver_timeout(struct net_device *dev)
+static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 34e94ee806d6..23f93f1c815d 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -635,7 +635,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
/* set up PLX mapping */
plx_phy = pci_resource_start(pdev, 0);
- card->plx = ioremap_nocache(plx_phy, 0x70);
+ card->plx = ioremap(plx_phy, 0x70);
if (!card->plx) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
@@ -704,7 +704,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
PCI_DMA_FROMDEVICE);
}
- mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+ mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
if (!mem) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 914be5847386..69773d228ec1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -276,7 +276,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
sl->xhead += actual;
}
-static void x25_asy_timeout(struct net_device *dev)
+static void x25_asy_timeout(struct net_device *dev, unsigned int txqueue)
{
struct x25_asy *sl = netdev_priv(dev);
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a5db3c06b646..a7fcbceb6e6b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -380,7 +380,7 @@ drop:
static
-void i2400m_tx_timeout(struct net_device *net_dev)
+void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
/*
* We might want to kick the device
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
new file mode 100644
index 000000000000..fc52b2cb500b
--- /dev/null
+++ b/drivers/net/wireguard/Makefile
@@ -0,0 +1,18 @@
+ccflags-y := -O3
+ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
+wireguard-y := main.o
+wireguard-y += noise.o
+wireguard-y += device.o
+wireguard-y += peer.o
+wireguard-y += timers.o
+wireguard-y += queueing.o
+wireguard-y += send.o
+wireguard-y += receive.o
+wireguard-y += socket.o
+wireguard-y += peerlookup.o
+wireguard-y += allowedips.o
+wireguard-y += ratelimiter.o
+wireguard-y += cookie.o
+wireguard-y += netlink.o
+obj-$(CONFIG_WIREGUARD) := wireguard.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
new file mode 100644
index 000000000000..121d9ea0f135
--- /dev/null
+++ b/drivers/net/wireguard/allowedips.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "allowedips.h"
+#include "peer.h"
+
+static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+{
+ if (bits == 32) {
+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ } else if (bits == 128) {
+ ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+ ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
+ }
+}
+
+static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
+ u8 cidr, u8 bits)
+{
+ node->cidr = cidr;
+ node->bit_at_a = cidr / 8U;
+#ifdef __LITTLE_ENDIAN
+ node->bit_at_a ^= (bits / 8U - 1U) % 8U;
+#endif
+ node->bit_at_b = 7U - (cidr % 8U);
+ node->bitlen = bits;
+ memcpy(node->bits, src, bits / 8U);
+}
+#define CHOOSE_NODE(parent, key) \
+ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static void push_rcu(struct allowedips_node **stack,
+ struct allowedips_node __rcu *p, unsigned int *len)
+{
+ if (rcu_access_pointer(p)) {
+ WARN_ON(IS_ENABLED(DEBUG) && *len >= 128);
+ stack[(*len)++] = rcu_dereference_raw(p);
+ }
+}
+
+static void root_free_rcu(struct rcu_head *rcu)
+{
+ struct allowedips_node *node, *stack[128] = {
+ container_of(rcu, struct allowedips_node, rcu) };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+ kfree(node);
+ }
+}
+
+static void root_remove_peer_lists(struct allowedips_node *root)
+{
+ struct allowedips_node *node, *stack[128] = { root };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+ if (rcu_access_pointer(node->peer))
+ list_del(&node->peer_list);
+ }
+}
+
+static void walk_remove_by_peer(struct allowedips_node __rcu **top,
+ struct wg_peer *peer, struct mutex *lock)
+{
+#define REF(p) rcu_access_pointer(p)
+#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
+#define PUSH(p) ({ \
+ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
+ stack[len++] = p; \
+ })
+
+ struct allowedips_node __rcu **stack[128], **nptr;
+ struct allowedips_node *node, *prev;
+ unsigned int len;
+
+ if (unlikely(!peer || !REF(*top)))
+ return;
+
+ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
+ nptr = stack[len - 1];
+ node = DEREF(nptr);
+ if (!node) {
+ --len;
+ continue;
+ }
+ if (!prev || REF(prev->bit[0]) == node ||
+ REF(prev->bit[1]) == node) {
+ if (REF(node->bit[0]))
+ PUSH(&node->bit[0]);
+ else if (REF(node->bit[1]))
+ PUSH(&node->bit[1]);
+ } else if (REF(node->bit[0]) == prev) {
+ if (REF(node->bit[1]))
+ PUSH(&node->bit[1]);
+ } else {
+ if (rcu_dereference_protected(node->peer,
+ lockdep_is_held(lock)) == peer) {
+ RCU_INIT_POINTER(node->peer, NULL);
+ list_del_init(&node->peer_list);
+ if (!node->bit[0] || !node->bit[1]) {
+ rcu_assign_pointer(*nptr, DEREF(
+ &node->bit[!REF(node->bit[0])]));
+ kfree_rcu(node, rcu);
+ node = DEREF(nptr);
+ }
+ }
+ --len;
+ }
+ }
+
+#undef REF
+#undef DEREF
+#undef PUSH
+}
+
+static unsigned int fls128(u64 a, u64 b)
+{
+ return a ? fls64(a) + 64U : fls64(b);
+}
+
+static u8 common_bits(const struct allowedips_node *node, const u8 *key,
+ u8 bits)
+{
+ if (bits == 32)
+ return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key);
+ else if (bits == 128)
+ return 128U - fls128(
+ *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0],
+ *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]);
+ return 0;
+}
+
+static bool prefix_matches(const struct allowedips_node *node, const u8 *key,
+ u8 bits)
+{
+ /* This could be much faster if it actually just compared the common
+ * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and
+ * the rest, but it turns out that common_bits is already super fast on
+ * modern processors, even taking into account the unfortunate bswap.
+ * So, we just inline it like this instead.
+ */
+ return common_bits(node, key, bits) >= node->cidr;
+}
+
+static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
+ const u8 *key)
+{
+ struct allowedips_node *node = trie, *found = NULL;
+
+ while (node && prefix_matches(node, key, bits)) {
+ if (rcu_access_pointer(node->peer))
+ found = node;
+ if (node->cidr == bits)
+ break;
+ node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+ }
+ return found;
+}
+
+/* Returns a strong reference to a peer */
+static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits,
+ const void *be_ip)
+{
+ /* Aligned so it can be passed to fls/fls64 */
+ u8 ip[16] __aligned(__alignof(u64));
+ struct allowedips_node *node;
+ struct wg_peer *peer = NULL;
+
+ swap_endian(ip, be_ip, bits);
+
+ rcu_read_lock_bh();
+retry:
+ node = find_node(rcu_dereference_bh(root), bits, ip);
+ if (node) {
+ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer));
+ if (!peer)
+ goto retry;
+ }
+ rcu_read_unlock_bh();
+ return peer;
+}
+
+static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ u8 cidr, u8 bits, struct allowedips_node **rnode,
+ struct mutex *lock)
+{
+ struct allowedips_node *node = rcu_dereference_protected(trie,
+ lockdep_is_held(lock));
+ struct allowedips_node *parent = NULL;
+ bool exact = false;
+
+ while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) {
+ parent = node;
+ if (parent->cidr == cidr) {
+ exact = true;
+ break;
+ }
+ node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
+ lockdep_is_held(lock));
+ }
+ *rnode = parent;
+ return exact;
+}
+
+static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ struct allowedips_node *node, *parent, *down, *newnode;
+
+ if (unlikely(cidr > bits || !peer))
+ return -EINVAL;
+
+ if (!rcu_access_pointer(*trie)) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+ RCU_INIT_POINTER(node->peer, peer);
+ list_add_tail(&node->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(node, key, cidr, bits);
+ rcu_assign_pointer(*trie, node);
+ return 0;
+ }
+ if (node_placement(*trie, key, cidr, bits, &node, lock)) {
+ rcu_assign_pointer(node->peer, peer);
+ list_move_tail(&node->peer_list, &peer->allowedips_list);
+ return 0;
+ }
+
+ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ if (unlikely(!newnode))
+ return -ENOMEM;
+ RCU_INIT_POINTER(newnode->peer, peer);
+ list_add_tail(&newnode->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(newnode, key, cidr, bits);
+
+ if (!node) {
+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
+ } else {
+ down = rcu_dereference_protected(CHOOSE_NODE(node, key),
+ lockdep_is_held(lock));
+ if (!down) {
+ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+ return 0;
+ }
+ }
+ cidr = min(cidr, common_bits(down, key, bits));
+ parent = node;
+
+ if (newnode->cidr == cidr) {
+ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+ if (!parent)
+ rcu_assign_pointer(*trie, newnode);
+ else
+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
+ newnode);
+ } else {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node)) {
+ kfree(newnode);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&node->peer_list);
+ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
+ if (!parent)
+ rcu_assign_pointer(*trie, node);
+ else
+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
+ node);
+ }
+ return 0;
+}
+
+void wg_allowedips_init(struct allowedips *table)
+{
+ table->root4 = table->root6 = NULL;
+ table->seq = 1;
+}
+
+void wg_allowedips_free(struct allowedips *table, struct mutex *lock)
+{
+ struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6;
+
+ ++table->seq;
+ RCU_INIT_POINTER(table->root4, NULL);
+ RCU_INIT_POINTER(table->root6, NULL);
+ if (rcu_access_pointer(old4)) {
+ struct allowedips_node *node = rcu_dereference_protected(old4,
+ lockdep_is_held(lock));
+
+ root_remove_peer_lists(node);
+ call_rcu(&node->rcu, root_free_rcu);
+ }
+ if (rcu_access_pointer(old6)) {
+ struct allowedips_node *node = rcu_dereference_protected(old6,
+ lockdep_is_held(lock));
+
+ root_remove_peer_lists(node);
+ call_rcu(&node->rcu, root_free_rcu);
+ }
+}
+
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls */
+ u8 key[4] __aligned(__alignof(u32));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 32);
+ return add(&table->root4, 32, key, cidr, peer, lock);
+}
+
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls64 */
+ u8 key[16] __aligned(__alignof(u64));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 128);
+ return add(&table->root6, 128, key, cidr, peer, lock);
+}
+
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock)
+{
+ ++table->seq;
+ walk_remove_by_peer(&table->root4, peer, lock);
+ walk_remove_by_peer(&table->root6, peer, lock);
+}
+
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
+{
+ const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U);
+ swap_endian(ip, node->bits, node->bitlen);
+ memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes);
+ if (node->cidr)
+ ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U);
+
+ *cidr = node->cidr;
+ return node->bitlen == 32 ? AF_INET : AF_INET6;
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return lookup(table->root4, 32, &ip_hdr(skb)->daddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr);
+ return NULL;
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return lookup(table->root4, 32, &ip_hdr(skb)->saddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr);
+ return NULL;
+}
+
+#include "selftest/allowedips.c"
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
new file mode 100644
index 000000000000..e5c83cafcef4
--- /dev/null
+++ b/drivers/net/wireguard/allowedips.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_ALLOWEDIPS_H
+#define _WG_ALLOWEDIPS_H
+
+#include <linux/mutex.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct wg_peer;
+
+struct allowedips_node {
+ struct wg_peer __rcu *peer;
+ struct allowedips_node __rcu *bit[2];
+ /* While it may seem scandalous that we waste space for v4,
+ * we're alloc'ing to the nearest power of 2 anyway, so this
+ * doesn't actually make a difference.
+ */
+ u8 bits[16] __aligned(__alignof(u64));
+ u8 cidr, bit_at_a, bit_at_b, bitlen;
+
+ /* Keep rarely used list at bottom to be beyond cache line. */
+ union {
+ struct list_head peer_list;
+ struct rcu_head rcu;
+ };
+};
+
+struct allowedips {
+ struct allowedips_node __rcu *root4;
+ struct allowedips_node __rcu *root6;
+ u64 seq;
+};
+
+void wg_allowedips_init(struct allowedips *table);
+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock);
+/* The ip input pointer should be __aligned(__alignof(u64))) */
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr);
+
+/* These return a strong reference to a peer: */
+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb);
+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb);
+
+#ifdef DEBUG
+bool wg_allowedips_selftest(void);
+#endif
+
+#endif /* _WG_ALLOWEDIPS_H */
diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c
new file mode 100644
index 000000000000..4956f0499c19
--- /dev/null
+++ b/drivers/net/wireguard/cookie.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "cookie.h"
+#include "peer.h"
+#include "device.h"
+#include "messages.h"
+#include "ratelimiter.h"
+#include "timers.h"
+
+#include <crypto/blake2s.h>
+#include <crypto/chacha20poly1305.h>
+
+#include <net/ipv6.h>
+#include <crypto/algapi.h>
+
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wg_device *wg)
+{
+ init_rwsem(&checker->secret_lock);
+ checker->secret_birthdate = ktime_get_coarse_boottime_ns();
+ get_random_bytes(checker->secret, NOISE_HASH_LEN);
+ checker->device = wg;
+}
+
+enum { COOKIE_KEY_LABEL_LEN = 8 };
+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
+
+static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
+ const u8 label[COOKIE_KEY_LABEL_LEN])
+{
+ struct blake2s_state blake;
+
+ blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN);
+ blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN);
+ blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN);
+ blake2s_final(&blake, key);
+}
+
+/* Must hold peer->handshake.static_identity->lock */
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker)
+{
+ if (likely(checker->device->static_identity.has_identity)) {
+ precompute_key(checker->cookie_encryption_key,
+ checker->device->static_identity.static_public,
+ cookie_key_label);
+ precompute_key(checker->message_mac1_key,
+ checker->device->static_identity.static_public,
+ mac1_key_label);
+ } else {
+ memset(checker->cookie_encryption_key, 0,
+ NOISE_SYMMETRIC_KEY_LEN);
+ memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN);
+ }
+}
+
+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer)
+{
+ precompute_key(peer->latest_cookie.cookie_decryption_key,
+ peer->handshake.remote_static, cookie_key_label);
+ precompute_key(peer->latest_cookie.message_mac1_key,
+ peer->handshake.remote_static, mac1_key_label);
+}
+
+void wg_cookie_init(struct cookie *cookie)
+{
+ memset(cookie, 0, sizeof(*cookie));
+ init_rwsem(&cookie->lock);
+}
+
+static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len,
+ const u8 key[NOISE_SYMMETRIC_KEY_LEN])
+{
+ len = len - sizeof(struct message_macs) +
+ offsetof(struct message_macs, mac1);
+ blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN);
+}
+
+static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len,
+ const u8 cookie[COOKIE_LEN])
+{
+ len = len - sizeof(struct message_macs) +
+ offsetof(struct message_macs, mac2);
+ blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN);
+}
+
+static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
+ struct cookie_checker *checker)
+{
+ struct blake2s_state state;
+
+ if (wg_birthdate_has_expired(checker->secret_birthdate,
+ COOKIE_SECRET_MAX_AGE)) {
+ down_write(&checker->secret_lock);
+ checker->secret_birthdate = ktime_get_coarse_boottime_ns();
+ get_random_bytes(checker->secret, NOISE_HASH_LEN);
+ up_write(&checker->secret_lock);
+ }
+
+ down_read(&checker->secret_lock);
+
+ blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN);
+ if (skb->protocol == htons(ETH_P_IP))
+ blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr,
+ sizeof(struct in_addr));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16));
+ blake2s_final(&state, cookie);
+
+ up_read(&checker->secret_lock);
+}
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie)
+{
+ struct message_macs *macs = (struct message_macs *)
+ (skb->data + skb->len - sizeof(*macs));
+ enum cookie_mac_state ret;
+ u8 computed_mac[COOKIE_LEN];
+ u8 cookie[COOKIE_LEN];
+
+ ret = INVALID_MAC;
+ compute_mac1(computed_mac, skb->data, skb->len,
+ checker->message_mac1_key);
+ if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN))
+ goto out;
+
+ ret = VALID_MAC_BUT_NO_COOKIE;
+
+ if (!check_cookie)
+ goto out;
+
+ make_cookie(cookie, skb, checker);
+
+ compute_mac2(computed_mac, skb->data, skb->len, cookie);
+ if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN))
+ goto out;
+
+ ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED;
+ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev)))
+ goto out;
+
+ ret = VALID_MAC_WITH_COOKIE;
+
+out:
+ return ret;
+}
+
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wg_peer *peer)
+{
+ struct message_macs *macs = (struct message_macs *)
+ ((u8 *)message + len - sizeof(*macs));
+
+ down_write(&peer->latest_cookie.lock);
+ compute_mac1(macs->mac1, message, len,
+ peer->latest_cookie.message_mac1_key);
+ memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN);
+ peer->latest_cookie.have_sent_mac1 = true;
+ up_write(&peer->latest_cookie.lock);
+
+ down_read(&peer->latest_cookie.lock);
+ if (peer->latest_cookie.is_valid &&
+ !wg_birthdate_has_expired(peer->latest_cookie.birthdate,
+ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
+ compute_mac2(macs->mac2, message, len,
+ peer->latest_cookie.cookie);
+ else
+ memset(macs->mac2, 0, COOKIE_LEN);
+ up_read(&peer->latest_cookie.lock);
+}
+
+void wg_cookie_message_create(struct message_handshake_cookie *dst,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker)
+{
+ struct message_macs *macs = (struct message_macs *)
+ ((u8 *)skb->data + skb->len - sizeof(*macs));
+ u8 cookie[COOKIE_LEN];
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE);
+ dst->receiver_index = index;
+ get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN);
+
+ make_cookie(cookie, skb, checker);
+ xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN,
+ macs->mac1, COOKIE_LEN, dst->nonce,
+ checker->cookie_encryption_key);
+}
+
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wg_device *wg)
+{
+ struct wg_peer *peer = NULL;
+ u8 cookie[COOKIE_LEN];
+ bool ret;
+
+ if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable,
+ INDEX_HASHTABLE_HANDSHAKE |
+ INDEX_HASHTABLE_KEYPAIR,
+ src->receiver_index, &peer)))
+ return;
+
+ down_read(&peer->latest_cookie.lock);
+ if (unlikely(!peer->latest_cookie.have_sent_mac1)) {
+ up_read(&peer->latest_cookie.lock);
+ goto out;
+ }
+ ret = xchacha20poly1305_decrypt(
+ cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie),
+ peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce,
+ peer->latest_cookie.cookie_decryption_key);
+ up_read(&peer->latest_cookie.lock);
+
+ if (ret) {
+ down_write(&peer->latest_cookie.lock);
+ memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN);
+ peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns();
+ peer->latest_cookie.is_valid = true;
+ peer->latest_cookie.have_sent_mac1 = false;
+ up_write(&peer->latest_cookie.lock);
+ } else {
+ net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n",
+ wg->dev->name);
+ }
+
+out:
+ wg_peer_put(peer);
+}
diff --git a/drivers/net/wireguard/cookie.h b/drivers/net/wireguard/cookie.h
new file mode 100644
index 000000000000..c4bd61ca03f2
--- /dev/null
+++ b/drivers/net/wireguard/cookie.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_COOKIE_H
+#define _WG_COOKIE_H
+
+#include "messages.h"
+#include <linux/rwsem.h>
+
+struct wg_peer;
+
+struct cookie_checker {
+ u8 secret[NOISE_HASH_LEN];
+ u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN];
+ u64 secret_birthdate;
+ struct rw_semaphore secret_lock;
+ struct wg_device *device;
+};
+
+struct cookie {
+ u64 birthdate;
+ bool is_valid;
+ u8 cookie[COOKIE_LEN];
+ bool have_sent_mac1;
+ u8 last_mac1_sent[COOKIE_LEN];
+ u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN];
+ struct rw_semaphore lock;
+};
+
+enum cookie_mac_state {
+ INVALID_MAC,
+ VALID_MAC_BUT_NO_COOKIE,
+ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED,
+ VALID_MAC_WITH_COOKIE
+};
+
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wg_device *wg);
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker);
+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer);
+void wg_cookie_init(struct cookie *cookie);
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie);
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wg_peer *peer);
+
+void wg_cookie_message_create(struct message_handshake_cookie *src,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker);
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wg_device *wg);
+
+#endif /* _WG_COOKIE_H */
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
new file mode 100644
index 000000000000..16b19824b9ad
--- /dev/null
+++ b/drivers/net/wireguard/device.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "socket.h"
+#include "timers.h"
+#include "device.h"
+#include "ratelimiter.h"
+#include "peer.h"
+#include "messages.h"
+
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmp.h>
+#include <linux/suspend.h>
+#include <net/icmp.h>
+#include <net/rtnetlink.h>
+#include <net/ip_tunnels.h>
+#include <net/addrconf.h>
+
+static LIST_HEAD(device_list);
+
+static int wg_open(struct net_device *dev)
+{
+ struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
+ struct inet6_dev *dev_v6 = __in6_dev_get(dev);
+ struct wg_device *wg = netdev_priv(dev);
+ struct wg_peer *peer;
+ int ret;
+
+ if (dev_v4) {
+ /* At some point we might put this check near the ip_rt_send_
+ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
+ * to the current secpath check.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
+ }
+ if (dev_v6)
+ dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
+
+ ret = wg_socket_init(wg, wg->incoming_port);
+ if (ret < 0)
+ return ret;
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ wg_packet_send_staged_packets(peer);
+ if (peer->persistent_keepalive_interval)
+ wg_packet_send_keepalive(peer);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct wg_device *wg;
+ struct wg_peer *peer;
+
+ /* If the machine is constantly suspending and resuming, as part of
+ * its normal operation rather than as a somewhat rare event, then we
+ * don't actually want to clear keys.
+ */
+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
+ return 0;
+
+ if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
+ return 0;
+
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ del_timer(&peer->timer_zero_key_material);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ }
+ rtnl_unlock();
+ rcu_barrier();
+ return 0;
+}
+
+static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
+#endif
+
+static int wg_stop(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ struct wg_peer *peer;
+
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ wg_packet_purge_staged_packets(peer);
+ wg_timers_stop(peer);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ skb_queue_purge(&wg->incoming_handshakes);
+ wg_socket_reinit(wg, NULL, NULL);
+ return 0;
+}
+
+static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ struct sk_buff_head packets;
+ struct wg_peer *peer;
+ struct sk_buff *next;
+ sa_family_t family;
+ u32 mtu;
+ int ret;
+
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
+ ret = -EPROTONOSUPPORT;
+ net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
+ goto err;
+ }
+
+ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
+ if (unlikely(!peer)) {
+ ret = -ENOKEY;
+ if (skb->protocol == htons(ETH_P_IP))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
+ dev->name, &ip_hdr(skb)->daddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
+ dev->name, &ipv6_hdr(skb)->daddr);
+ goto err;
+ }
+
+ family = READ_ONCE(peer->endpoint.addr.sa_family);
+ if (unlikely(family != AF_INET && family != AF_INET6)) {
+ ret = -EDESTADDRREQ;
+ net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
+ dev->name, peer->internal_id);
+ goto err_peer;
+ }
+
+ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+
+ __skb_queue_head_init(&packets);
+ if (!skb_is_gso(skb)) {
+ skb_mark_not_on_list(skb);
+ } else {
+ struct sk_buff *segs = skb_gso_segment(skb, 0);
+
+ if (unlikely(IS_ERR(segs))) {
+ ret = PTR_ERR(segs);
+ goto err_peer;
+ }
+ dev_kfree_skb(skb);
+ skb = segs;
+ }
+
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ continue;
+
+ /* We only need to keep the original dst around for icmp,
+ * so at this point we're in a position to drop it.
+ */
+ skb_dst_drop(skb);
+
+ PACKET_CB(skb)->mtu = mtu;
+
+ __skb_queue_tail(&packets, skb);
+ }
+
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ /* If the queue is getting too big, we start removing the oldest packets
+ * until it's small again. We do this before adding the new packet, so
+ * we don't remove GSO segments that are in excess.
+ */
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+ ++dev->stats.tx_dropped;
+ }
+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+
+ wg_packet_send_staged_packets(peer);
+
+ wg_peer_put(peer);
+ return NETDEV_TX_OK;
+
+err_peer:
+ wg_peer_put(peer);
+err:
+ ++dev->stats.tx_errors;
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ kfree_skb(skb);
+ return ret;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = wg_open,
+ .ndo_stop = wg_stop,
+ .ndo_start_xmit = wg_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64
+};
+
+static void wg_destruct(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+
+ rtnl_lock();
+ list_del(&wg->device_list);
+ rtnl_unlock();
+ mutex_lock(&wg->device_update_lock);
+ wg->incoming_port = 0;
+ wg_socket_reinit(wg, NULL, NULL);
+ /* The final references are cleared in the below calls to destroy_workqueue. */
+ wg_peer_remove_all(wg);
+ destroy_workqueue(wg->handshake_receive_wq);
+ destroy_workqueue(wg->handshake_send_wq);
+ destroy_workqueue(wg->packet_crypt_wq);
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue, true);
+ rcu_barrier(); /* Wait for all the peers to be actually freed. */
+ wg_ratelimiter_uninit();
+ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
+ skb_queue_purge(&wg->incoming_handshakes);
+ free_percpu(dev->tstats);
+ free_percpu(wg->incoming_handshakes_worker);
+ if (wg->have_creating_net_ref)
+ put_net(wg->creating_net);
+ kvfree(wg->index_hashtable);
+ kvfree(wg->peer_hashtable);
+ mutex_unlock(&wg->device_update_lock);
+
+ pr_debug("%s: Interface deleted\n", dev->name);
+ free_netdev(dev);
+}
+
+static const struct device_type device_type = { .name = KBUILD_MODNAME };
+
+static void wg_setup(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
+
+ dev->netdev_ops = &netdev_ops;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
+ dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= WG_NETDEV_FEATURES;
+ dev->hw_features |= WG_NETDEV_FEATURES;
+ dev->hw_enc_features |= WG_NETDEV_FEATURES;
+ dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH -
+ sizeof(struct udphdr) -
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
+
+ SET_NETDEV_DEVTYPE(dev, &device_type);
+
+ /* We need to keep the dst around in case of icmp replies. */
+ netif_keep_dst(dev);
+
+ memset(wg, 0, sizeof(*wg));
+ wg->dev = dev;
+}
+
+static int wg_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ int ret = -ENOMEM;
+
+ wg->creating_net = src_net;
+ init_rwsem(&wg->static_identity.lock);
+ mutex_init(&wg->socket_update_lock);
+ mutex_init(&wg->device_update_lock);
+ skb_queue_head_init(&wg->incoming_handshakes);
+ wg_allowedips_init(&wg->peer_allowedips);
+ wg_cookie_checker_init(&wg->cookie_checker, wg);
+ INIT_LIST_HEAD(&wg->peer_list);
+ wg->device_update_gen = 1;
+
+ wg->peer_hashtable = wg_pubkey_hashtable_alloc();
+ if (!wg->peer_hashtable)
+ return ret;
+
+ wg->index_hashtable = wg_index_hashtable_alloc();
+ if (!wg->index_hashtable)
+ goto err_free_peer_hashtable;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ goto err_free_index_hashtable;
+
+ wg->incoming_handshakes_worker =
+ wg_packet_percpu_multicore_worker_alloc(
+ wg_packet_handshake_receive_worker, wg);
+ if (!wg->incoming_handshakes_worker)
+ goto err_free_tstats;
+
+ wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
+ WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
+ if (!wg->handshake_receive_wq)
+ goto err_free_incoming_handshakes;
+
+ wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
+ WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
+ if (!wg->handshake_send_wq)
+ goto err_destroy_handshake_receive;
+
+ wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
+ if (!wg->packet_crypt_wq)
+ goto err_destroy_handshake_send;
+
+ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
+ true, MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_destroy_packet_crypt;
+
+ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
+ true, MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_free_encrypt_queue;
+
+ ret = wg_ratelimiter_init();
+ if (ret < 0)
+ goto err_free_decrypt_queue;
+
+ ret = register_netdevice(dev);
+ if (ret < 0)
+ goto err_uninit_ratelimiter;
+
+ list_add(&wg->device_list, &device_list);
+
+ /* We wait until the end to assign priv_destructor, so that
+ * register_netdevice doesn't call it for us if it fails.
+ */
+ dev->priv_destructor = wg_destruct;
+
+ pr_debug("%s: Interface created\n", dev->name);
+ return ret;
+
+err_uninit_ratelimiter:
+ wg_ratelimiter_uninit();
+err_free_decrypt_queue:
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+err_free_encrypt_queue:
+ wg_packet_queue_free(&wg->encrypt_queue, true);
+err_destroy_packet_crypt:
+ destroy_workqueue(wg->packet_crypt_wq);
+err_destroy_handshake_send:
+ destroy_workqueue(wg->handshake_send_wq);
+err_destroy_handshake_receive:
+ destroy_workqueue(wg->handshake_receive_wq);
+err_free_incoming_handshakes:
+ free_percpu(wg->incoming_handshakes_worker);
+err_free_tstats:
+ free_percpu(dev->tstats);
+err_free_index_hashtable:
+ kvfree(wg->index_hashtable);
+err_free_peer_hashtable:
+ kvfree(wg->peer_hashtable);
+ return ret;
+}
+
+static struct rtnl_link_ops link_ops __read_mostly = {
+ .kind = KBUILD_MODNAME,
+ .priv_size = sizeof(struct wg_device),
+ .setup = wg_setup,
+ .newlink = wg_newlink,
+};
+
+static int wg_netdevice_notification(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
+ struct wg_device *wg = netdev_priv(dev);
+
+ ASSERT_RTNL();
+
+ if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
+ return 0;
+
+ if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
+ put_net(wg->creating_net);
+ wg->have_creating_net_ref = false;
+ } else if (dev_net(dev) != wg->creating_net &&
+ !wg->have_creating_net_ref) {
+ wg->have_creating_net_ref = true;
+ get_net(wg->creating_net);
+ }
+ return 0;
+}
+
+static struct notifier_block netdevice_notifier = {
+ .notifier_call = wg_netdevice_notification
+};
+
+int __init wg_device_init(void)
+{
+ int ret;
+
+#ifdef CONFIG_PM_SLEEP
+ ret = register_pm_notifier(&pm_notifier);
+ if (ret)
+ return ret;
+#endif
+
+ ret = register_netdevice_notifier(&netdevice_notifier);
+ if (ret)
+ goto error_pm;
+
+ ret = rtnl_link_register(&link_ops);
+ if (ret)
+ goto error_netdevice;
+
+ return 0;
+
+error_netdevice:
+ unregister_netdevice_notifier(&netdevice_notifier);
+error_pm:
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&pm_notifier);
+#endif
+ return ret;
+}
+
+void wg_device_uninit(void)
+{
+ rtnl_link_unregister(&link_ops);
+ unregister_netdevice_notifier(&netdevice_notifier);
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&pm_notifier);
+#endif
+ rcu_barrier();
+}
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
new file mode 100644
index 000000000000..b15a8be9d816
--- /dev/null
+++ b/drivers/net/wireguard/device.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_DEVICE_H
+#define _WG_DEVICE_H
+
+#include "noise.h"
+#include "allowedips.h"
+#include "peerlookup.h"
+#include "cookie.h"
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/ptr_ring.h>
+
+struct wg_device;
+
+struct multicore_worker {
+ void *ptr;
+ struct work_struct work;
+};
+
+struct crypt_queue {
+ struct ptr_ring ring;
+ union {
+ struct {
+ struct multicore_worker __percpu *worker;
+ int last_cpu;
+ };
+ struct work_struct work;
+ };
+};
+
+struct wg_device {
+ struct net_device *dev;
+ struct crypt_queue encrypt_queue, decrypt_queue;
+ struct sock __rcu *sock4, *sock6;
+ struct net *creating_net;
+ struct noise_static_identity static_identity;
+ struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
+ struct workqueue_struct *packet_crypt_wq;
+ struct sk_buff_head incoming_handshakes;
+ int incoming_handshake_cpu;
+ struct multicore_worker __percpu *incoming_handshakes_worker;
+ struct cookie_checker cookie_checker;
+ struct pubkey_hashtable *peer_hashtable;
+ struct index_hashtable *index_hashtable;
+ struct allowedips peer_allowedips;
+ struct mutex device_update_lock, socket_update_lock;
+ struct list_head device_list, peer_list;
+ unsigned int num_peers, device_update_gen;
+ u32 fwmark;
+ u16 incoming_port;
+ bool have_creating_net_ref;
+};
+
+int wg_device_init(void);
+void wg_device_uninit(void);
+
+#endif /* _WG_DEVICE_H */
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
new file mode 100644
index 000000000000..7a7d5f1a80fc
--- /dev/null
+++ b/drivers/net/wireguard/main.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "version.h"
+#include "device.h"
+#include "noise.h"
+#include "queueing.h"
+#include "ratelimiter.h"
+#include "netlink.h"
+
+#include <uapi/linux/wireguard.h>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/genetlink.h>
+#include <net/rtnetlink.h>
+
+static int __init mod_init(void)
+{
+ int ret;
+
+#ifdef DEBUG
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
+ !wg_ratelimiter_selftest())
+ return -ENOTRECOVERABLE;
+#endif
+ wg_noise_init();
+
+ ret = wg_device_init();
+ if (ret < 0)
+ goto err_device;
+
+ ret = wg_genetlink_init();
+ if (ret < 0)
+ goto err_netlink;
+
+ pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n");
+ pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.\n");
+
+ return 0;
+
+err_netlink:
+ wg_device_uninit();
+err_device:
+ return ret;
+}
+
+static void __exit mod_exit(void)
+{
+ wg_genetlink_uninit();
+ wg_device_uninit();
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WireGuard secure network tunnel");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_VERSION(WIREGUARD_VERSION);
+MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME);
+MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME);
diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h
new file mode 100644
index 000000000000..b8a7b9ce32ba
--- /dev/null
+++ b/drivers/net/wireguard/messages.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_MESSAGES_H
+#define _WG_MESSAGES_H
+
+#include <crypto/curve25519.h>
+#include <crypto/chacha20poly1305.h>
+#include <crypto/blake2s.h>
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/skbuff.h>
+
+enum noise_lengths {
+ NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE,
+ NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE,
+ NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32),
+ NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE,
+ NOISE_HASH_LEN = BLAKE2S_HASH_SIZE
+};
+
+#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN)
+
+enum cookie_values {
+ COOKIE_SECRET_MAX_AGE = 2 * 60,
+ COOKIE_SECRET_LATENCY = 5,
+ COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE,
+ COOKIE_LEN = 16
+};
+
+enum counter_values {
+ COUNTER_BITS_TOTAL = 2048,
+ COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
+ COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
+};
+
+enum limits {
+ REKEY_AFTER_MESSAGES = 1ULL << 60,
+ REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1,
+ REKEY_TIMEOUT = 5,
+ REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3,
+ REKEY_AFTER_TIME = 120,
+ REJECT_AFTER_TIME = 180,
+ INITIATIONS_PER_SECOND = 50,
+ MAX_PEERS_PER_DEVICE = 1U << 20,
+ KEEPALIVE_TIMEOUT = 10,
+ MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT,
+ MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */
+ MAX_STAGED_PACKETS = 128,
+ MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */
+};
+
+enum message_type {
+ MESSAGE_INVALID = 0,
+ MESSAGE_HANDSHAKE_INITIATION = 1,
+ MESSAGE_HANDSHAKE_RESPONSE = 2,
+ MESSAGE_HANDSHAKE_COOKIE = 3,
+ MESSAGE_DATA = 4
+};
+
+struct message_header {
+ /* The actual layout of this that we want is:
+ * u8 type
+ * u8 reserved_zero[3]
+ *
+ * But it turns out that by encoding this as little endian,
+ * we achieve the same thing, and it makes checking faster.
+ */
+ __le32 type;
+};
+
+struct message_macs {
+ u8 mac1[COOKIE_LEN];
+ u8 mac2[COOKIE_LEN];
+};
+
+struct message_handshake_initiation {
+ struct message_header header;
+ __le32 sender_index;
+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)];
+ u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)];
+ struct message_macs macs;
+};
+
+struct message_handshake_response {
+ struct message_header header;
+ __le32 sender_index;
+ __le32 receiver_index;
+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 encrypted_nothing[noise_encrypted_len(0)];
+ struct message_macs macs;
+};
+
+struct message_handshake_cookie {
+ struct message_header header;
+ __le32 receiver_index;
+ u8 nonce[COOKIE_NONCE_LEN];
+ u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)];
+};
+
+struct message_data {
+ struct message_header header;
+ __le32 key_idx;
+ __le64 counter;
+ u8 encrypted_data[];
+};
+
+#define message_data_len(plain_len) \
+ (noise_encrypted_len(plain_len) + sizeof(struct message_data))
+
+enum message_alignments {
+ MESSAGE_PADDING_MULTIPLE = 16,
+ MESSAGE_MINIMUM_LENGTH = message_data_len(0)
+};
+
+#define SKB_HEADER_LEN \
+ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \
+ sizeof(struct udphdr) + NET_SKB_PAD)
+#define DATA_PACKET_HEAD_ROOM \
+ ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4)
+
+enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ };
+
+#endif /* _WG_MESSAGES_H */
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
new file mode 100644
index 000000000000..0fdbd1c45977
--- /dev/null
+++ b/drivers/net/wireguard/netlink.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "netlink.h"
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "queueing.h"
+#include "messages.h"
+
+#include <uapi/linux/wireguard.h>
+
+#include <linux/if.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include <crypto/algapi.h>
+
+static struct genl_family genl_family;
+
+static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
+ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
+ [WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
+ [WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
+};
+
+static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
+ [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN },
+ [WGPEER_A_FLAGS] = { .type = NLA_U32 },
+ [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) },
+ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) },
+ [WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
+ [WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
+ [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
+ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 }
+};
+
+static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
+ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
+ [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) },
+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
+};
+
+static struct wg_device *lookup_interface(struct nlattr **attrs,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = NULL;
+
+ if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME])
+ return ERR_PTR(-EBADR);
+ if (attrs[WGDEVICE_A_IFINDEX])
+ dev = dev_get_by_index(sock_net(skb->sk),
+ nla_get_u32(attrs[WGDEVICE_A_IFINDEX]));
+ else if (attrs[WGDEVICE_A_IFNAME])
+ dev = dev_get_by_name(sock_net(skb->sk),
+ nla_data(attrs[WGDEVICE_A_IFNAME]));
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+ if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind ||
+ strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) {
+ dev_put(dev);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ return netdev_priv(dev);
+}
+
+static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr,
+ int family)
+{
+ struct nlattr *allowedip_nest;
+
+ allowedip_nest = nla_nest_start(skb, 0);
+ if (!allowedip_nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) ||
+ nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) ||
+ nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ?
+ sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) {
+ nla_nest_cancel(skb, allowedip_nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, allowedip_nest);
+ return 0;
+}
+
+struct dump_ctx {
+ struct wg_device *wg;
+ struct wg_peer *next_peer;
+ u64 allowedips_seq;
+ struct allowedips_node *next_allowedip;
+};
+
+#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
+
+static int
+get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
+{
+
+ struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0);
+ struct allowedips_node *allowedips_node = ctx->next_allowedip;
+ bool fail;
+
+ if (!peer_nest)
+ return -EMSGSIZE;
+
+ down_read(&peer->handshake.lock);
+ fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN,
+ peer->handshake.remote_static);
+ up_read(&peer->handshake.lock);
+ if (fail)
+ goto err;
+
+ if (!allowedips_node) {
+ const struct __kernel_timespec last_handshake = {
+ .tv_sec = peer->walltime_last_handshake.tv_sec,
+ .tv_nsec = peer->walltime_last_handshake.tv_nsec
+ };
+
+ down_read(&peer->handshake.lock);
+ fail = nla_put(skb, WGPEER_A_PRESHARED_KEY,
+ NOISE_SYMMETRIC_KEY_LEN,
+ peer->handshake.preshared_key);
+ up_read(&peer->handshake.lock);
+ if (fail)
+ goto err;
+
+ if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME,
+ sizeof(last_handshake), &last_handshake) ||
+ nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
+ peer->persistent_keepalive_interval) ||
+ nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes,
+ WGPEER_A_UNSPEC) ||
+ nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes,
+ WGPEER_A_UNSPEC) ||
+ nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1))
+ goto err;
+
+ read_lock_bh(&peer->endpoint_lock);
+ if (peer->endpoint.addr.sa_family == AF_INET)
+ fail = nla_put(skb, WGPEER_A_ENDPOINT,
+ sizeof(peer->endpoint.addr4),
+ &peer->endpoint.addr4);
+ else if (peer->endpoint.addr.sa_family == AF_INET6)
+ fail = nla_put(skb, WGPEER_A_ENDPOINT,
+ sizeof(peer->endpoint.addr6),
+ &peer->endpoint.addr6);
+ read_unlock_bh(&peer->endpoint_lock);
+ if (fail)
+ goto err;
+ allowedips_node =
+ list_first_entry_or_null(&peer->allowedips_list,
+ struct allowedips_node, peer_list);
+ }
+ if (!allowedips_node)
+ goto no_allowedips;
+ if (!ctx->allowedips_seq)
+ ctx->allowedips_seq = peer->device->peer_allowedips.seq;
+ else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
+ goto no_allowedips;
+
+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
+ if (!allowedips_nest)
+ goto err;
+
+ list_for_each_entry_from(allowedips_node, &peer->allowedips_list,
+ peer_list) {
+ u8 cidr, ip[16] __aligned(__alignof(u64));
+ int family;
+
+ family = wg_allowedips_read_node(allowedips_node, ip, &cidr);
+ if (get_allowedips(skb, ip, cidr, family)) {
+ nla_nest_end(skb, allowedips_nest);
+ nla_nest_end(skb, peer_nest);
+ ctx->next_allowedip = allowedips_node;
+ return -EMSGSIZE;
+ }
+ }
+ nla_nest_end(skb, allowedips_nest);
+no_allowedips:
+ nla_nest_end(skb, peer_nest);
+ ctx->next_allowedip = NULL;
+ ctx->allowedips_seq = 0;
+ return 0;
+err:
+ nla_nest_cancel(skb, peer_nest);
+ return -EMSGSIZE;
+}
+
+static int wg_get_device_start(struct netlink_callback *cb)
+{
+ struct wg_device *wg;
+
+ wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb);
+ if (IS_ERR(wg))
+ return PTR_ERR(wg);
+ DUMP_CTX(cb)->wg = wg;
+ return 0;
+}
+
+static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct wg_peer *peer, *next_peer_cursor;
+ struct dump_ctx *ctx = DUMP_CTX(cb);
+ struct wg_device *wg = ctx->wg;
+ struct nlattr *peers_nest;
+ int ret = -EMSGSIZE;
+ bool done = true;
+ void *hdr;
+
+ rtnl_lock();
+ mutex_lock(&wg->device_update_lock);
+ cb->seq = wg->device_update_gen;
+ next_peer_cursor = ctx->next_peer;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE);
+ if (!hdr)
+ goto out;
+ genl_dump_check_consistent(cb, hdr);
+
+ if (!ctx->next_peer) {
+ if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT,
+ wg->incoming_port) ||
+ nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) ||
+ nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) ||
+ nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name))
+ goto out;
+
+ down_read(&wg->static_identity.lock);
+ if (wg->static_identity.has_identity) {
+ if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY,
+ NOISE_PUBLIC_KEY_LEN,
+ wg->static_identity.static_private) ||
+ nla_put(skb, WGDEVICE_A_PUBLIC_KEY,
+ NOISE_PUBLIC_KEY_LEN,
+ wg->static_identity.static_public)) {
+ up_read(&wg->static_identity.lock);
+ goto out;
+ }
+ }
+ up_read(&wg->static_identity.lock);
+ }
+
+ peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS);
+ if (!peers_nest)
+ goto out;
+ ret = 0;
+ /* If the last cursor was removed via list_del_init in peer_remove, then
+ * we just treat this the same as there being no more peers left. The
+ * reason is that seq_nr should indicate to userspace that this isn't a
+ * coherent dump anyway, so they'll try again.
+ */
+ if (list_empty(&wg->peer_list) ||
+ (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
+ nla_nest_cancel(skb, peers_nest);
+ goto out;
+ }
+ lockdep_assert_held(&wg->device_update_lock);
+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
+ if (get_peer(peer, skb, ctx)) {
+ done = false;
+ break;
+ }
+ next_peer_cursor = peer;
+ }
+ nla_nest_end(skb, peers_nest);
+
+out:
+ if (!ret && !done && next_peer_cursor)
+ wg_peer_get(next_peer_cursor);
+ wg_peer_put(ctx->next_peer);
+ mutex_unlock(&wg->device_update_lock);
+ rtnl_unlock();
+
+ if (ret) {
+ genlmsg_cancel(skb, hdr);
+ return ret;
+ }
+ genlmsg_end(skb, hdr);
+ if (done) {
+ ctx->next_peer = NULL;
+ return 0;
+ }
+ ctx->next_peer = next_peer_cursor;
+ return skb->len;
+
+ /* At this point, we can't really deal ourselves with safely zeroing out
+ * the private key material after usage. This will need an additional API
+ * in the kernel for marking skbs as zero_on_free.
+ */
+}
+
+static int wg_get_device_done(struct netlink_callback *cb)
+{
+ struct dump_ctx *ctx = DUMP_CTX(cb);
+
+ if (ctx->wg)
+ dev_put(ctx->wg->dev);
+ wg_peer_put(ctx->next_peer);
+ return 0;
+}
+
+static int set_port(struct wg_device *wg, u16 port)
+{
+ struct wg_peer *peer;
+
+ if (wg->incoming_port == port)
+ return 0;
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
+ if (!netif_running(wg->dev)) {
+ wg->incoming_port = port;
+ return 0;
+ }
+ return wg_socket_init(wg, port);
+}
+
+static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
+{
+ int ret = -EINVAL;
+ u16 family;
+ u8 cidr;
+
+ if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] ||
+ !attrs[WGALLOWEDIP_A_CIDR_MASK])
+ return ret;
+ family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
+ cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
+
+ if (family == AF_INET && cidr <= 32 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
+ ret = wg_allowedips_insert_v4(
+ &peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
+ &peer->device->device_update_lock);
+ else if (family == AF_INET6 && cidr <= 128 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
+ ret = wg_allowedips_insert_v6(
+ &peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
+ &peer->device->device_update_lock);
+
+ return ret;
+}
+
+static int set_peer(struct wg_device *wg, struct nlattr **attrs)
+{
+ u8 *public_key = NULL, *preshared_key = NULL;
+ struct wg_peer *peer = NULL;
+ u32 flags = 0;
+ int ret;
+
+ ret = -EINVAL;
+ if (attrs[WGPEER_A_PUBLIC_KEY] &&
+ nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN)
+ public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]);
+ else
+ goto out;
+ if (attrs[WGPEER_A_PRESHARED_KEY] &&
+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN)
+ preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]);
+
+ if (attrs[WGPEER_A_FLAGS])
+ flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
+ ret = -EOPNOTSUPP;
+ if (flags & ~__WGPEER_F_ALL)
+ goto out;
+
+ ret = -EPFNOSUPPORT;
+ if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
+ if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1)
+ goto out;
+ }
+
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
+ nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
+ ret = 0;
+ if (!peer) { /* Peer doesn't exist yet. Add a new one. */
+ if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY))
+ goto out;
+
+ /* The peer is new, so there aren't allowed IPs to remove. */
+ flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS;
+
+ down_read(&wg->static_identity.lock);
+ if (wg->static_identity.has_identity &&
+ !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]),
+ wg->static_identity.static_public,
+ NOISE_PUBLIC_KEY_LEN)) {
+ /* We silently ignore peers that have the same public
+ * key as the device. The reason we do it silently is
+ * that we'd like for people to be able to reuse the
+ * same set of API calls across peers.
+ */
+ up_read(&wg->static_identity.lock);
+ ret = 0;
+ goto out;
+ }
+ up_read(&wg->static_identity.lock);
+
+ peer = wg_peer_create(wg, public_key, preshared_key);
+ if (IS_ERR(peer)) {
+ /* Similar to the above, if the key is invalid, we skip
+ * it without fanfare, so that services don't need to
+ * worry about doing key validation themselves.
+ */
+ ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer);
+ peer = NULL;
+ goto out;
+ }
+ /* Take additional reference, as though we've just been
+ * looked up.
+ */
+ wg_peer_get(peer);
+ }
+
+ if (flags & WGPEER_F_REMOVE_ME) {
+ wg_peer_remove(peer);
+ goto out;
+ }
+
+ if (preshared_key) {
+ down_write(&peer->handshake.lock);
+ memcpy(&peer->handshake.preshared_key, preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
+ up_write(&peer->handshake.lock);
+ }
+
+ if (attrs[WGPEER_A_ENDPOINT]) {
+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+
+ if ((len == sizeof(struct sockaddr_in) &&
+ addr->sa_family == AF_INET) ||
+ (len == sizeof(struct sockaddr_in6) &&
+ addr->sa_family == AF_INET6)) {
+ struct endpoint endpoint = { { { 0 } } };
+
+ memcpy(&endpoint.addr, addr, len);
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ }
+ }
+
+ if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
+ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
+ &wg->device_update_lock);
+
+ if (attrs[WGPEER_A_ALLOWEDIPS]) {
+ struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
+ int rem;
+
+ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
+ ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
+ attr, allowedip_policy, NULL);
+ if (ret < 0)
+ goto out;
+ ret = set_allowedip(peer, allowedip);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) {
+ const u16 persistent_keepalive_interval = nla_get_u16(
+ attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
+ const bool send_keepalive =
+ !peer->persistent_keepalive_interval &&
+ persistent_keepalive_interval &&
+ netif_running(wg->dev);
+
+ peer->persistent_keepalive_interval = persistent_keepalive_interval;
+ if (send_keepalive)
+ wg_packet_send_keepalive(peer);
+ }
+
+ if (netif_running(wg->dev))
+ wg_packet_send_staged_packets(peer);
+
+out:
+ wg_peer_put(peer);
+ if (attrs[WGPEER_A_PRESHARED_KEY])
+ memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
+ return ret;
+}
+
+static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
+{
+ struct wg_device *wg = lookup_interface(info->attrs, skb);
+ u32 flags = 0;
+ int ret;
+
+ if (IS_ERR(wg)) {
+ ret = PTR_ERR(wg);
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ mutex_lock(&wg->device_update_lock);
+
+ if (info->attrs[WGDEVICE_A_FLAGS])
+ flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
+ ret = -EOPNOTSUPP;
+ if (flags & ~__WGDEVICE_F_ALL)
+ goto out;
+
+ ret = -EPERM;
+ if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
+ info->attrs[WGDEVICE_A_FWMARK]) &&
+ !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
+ goto out;
+
+ ++wg->device_update_gen;
+
+ if (info->attrs[WGDEVICE_A_FWMARK]) {
+ struct wg_peer *peer;
+
+ wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
+ }
+
+ if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
+ ret = set_port(wg,
+ nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT]));
+ if (ret)
+ goto out;
+ }
+
+ if (flags & WGDEVICE_F_REPLACE_PEERS)
+ wg_peer_remove_all(wg);
+
+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
+ NOISE_PUBLIC_KEY_LEN) {
+ u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
+ u8 public_key[NOISE_PUBLIC_KEY_LEN];
+ struct wg_peer *peer, *temp;
+
+ if (!crypto_memneq(wg->static_identity.static_private,
+ private_key, NOISE_PUBLIC_KEY_LEN))
+ goto skip_set_private_key;
+
+ /* We remove before setting, to prevent race, which means doing
+ * two 25519-genpub ops.
+ */
+ if (curve25519_generate_public(public_key, private_key)) {
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
+ public_key);
+ if (peer) {
+ wg_peer_put(peer);
+ wg_peer_remove(peer);
+ }
+ }
+
+ down_write(&wg->static_identity.lock);
+ wg_noise_set_static_identity_private_key(&wg->static_identity,
+ private_key);
+ list_for_each_entry_safe(peer, temp, &wg->peer_list,
+ peer_list) {
+ if (wg_noise_precompute_static_static(peer))
+ wg_noise_expire_current_peer_keypairs(peer);
+ else
+ wg_peer_remove(peer);
+ }
+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
+ up_write(&wg->static_identity.lock);
+ }
+skip_set_private_key:
+
+ if (info->attrs[WGDEVICE_A_PEERS]) {
+ struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
+ ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
+ peer_policy, NULL);
+ if (ret < 0)
+ goto out;
+ ret = set_peer(wg, peer);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = 0;
+
+out:
+ mutex_unlock(&wg->device_update_lock);
+ rtnl_unlock();
+ dev_put(wg->dev);
+out_nodev:
+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY])
+ memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]),
+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]));
+ return ret;
+}
+
+static const struct genl_ops genl_ops[] = {
+ {
+ .cmd = WG_CMD_GET_DEVICE,
+ .start = wg_get_device_start,
+ .dumpit = wg_get_device_dump,
+ .done = wg_get_device_done,
+ .flags = GENL_UNS_ADMIN_PERM
+ }, {
+ .cmd = WG_CMD_SET_DEVICE,
+ .doit = wg_set_device,
+ .flags = GENL_UNS_ADMIN_PERM
+ }
+};
+
+static struct genl_family genl_family __ro_after_init = {
+ .ops = genl_ops,
+ .n_ops = ARRAY_SIZE(genl_ops),
+ .name = WG_GENL_NAME,
+ .version = WG_GENL_VERSION,
+ .maxattr = WGDEVICE_A_MAX,
+ .module = THIS_MODULE,
+ .policy = device_policy,
+ .netnsok = true
+};
+
+int __init wg_genetlink_init(void)
+{
+ return genl_register_family(&genl_family);
+}
+
+void __exit wg_genetlink_uninit(void)
+{
+ genl_unregister_family(&genl_family);
+}
diff --git a/drivers/net/wireguard/netlink.h b/drivers/net/wireguard/netlink.h
new file mode 100644
index 000000000000..15100d92e2e3
--- /dev/null
+++ b/drivers/net/wireguard/netlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_NETLINK_H
+#define _WG_NETLINK_H
+
+int wg_genetlink_init(void);
+void wg_genetlink_uninit(void);
+
+#endif /* _WG_NETLINK_H */
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
new file mode 100644
index 000000000000..d71c8db68a8c
--- /dev/null
+++ b/drivers/net/wireguard/noise.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "noise.h"
+#include "device.h"
+#include "peer.h"
+#include "messages.h"
+#include "queueing.h"
+#include "peerlookup.h"
+
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+#include <crypto/algapi.h>
+
+/* This implements Noise_IKpsk2:
+ *
+ * <- s
+ * ******
+ * -> e, es, s, ss, {t}
+ * <- e, ee, se, psk, {}
+ */
+
+static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
+static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
+static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
+static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
+static atomic64_t keypair_counter = ATOMIC64_INIT(0);
+
+void __init wg_noise_init(void)
+{
+ struct blake2s_state blake;
+
+ blake2s(handshake_init_chaining_key, handshake_name, NULL,
+ NOISE_HASH_LEN, sizeof(handshake_name), 0);
+ blake2s_init(&blake, NOISE_HASH_LEN);
+ blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN);
+ blake2s_update(&blake, identifier_name, sizeof(identifier_name));
+ blake2s_final(&blake, handshake_init_hash);
+}
+
+/* Must hold peer->handshake.static_identity->lock */
+bool wg_noise_precompute_static_static(struct wg_peer *peer)
+{
+ bool ret = true;
+
+ down_write(&peer->handshake.lock);
+ if (peer->handshake.static_identity->has_identity)
+ ret = curve25519(
+ peer->handshake.precomputed_static_static,
+ peer->handshake.static_identity->static_private,
+ peer->handshake.remote_static);
+ else
+ memset(peer->handshake.precomputed_static_static, 0,
+ NOISE_PUBLIC_KEY_LEN);
+ up_write(&peer->handshake.lock);
+ return ret;
+}
+
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wg_peer *peer)
+{
+ memset(handshake, 0, sizeof(*handshake));
+ init_rwsem(&handshake->lock);
+ handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE;
+ handshake->entry.peer = peer;
+ memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN);
+ if (peer_preshared_key)
+ memcpy(handshake->preshared_key, peer_preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
+ handshake->static_identity = static_identity;
+ handshake->state = HANDSHAKE_ZEROED;
+ return wg_noise_precompute_static_static(peer);
+}
+
+static void handshake_zero(struct noise_handshake *handshake)
+{
+ memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN);
+ memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN);
+ memset(&handshake->hash, 0, NOISE_HASH_LEN);
+ memset(&handshake->chaining_key, 0, NOISE_HASH_LEN);
+ handshake->remote_index = 0;
+ handshake->state = HANDSHAKE_ZEROED;
+}
+
+void wg_noise_handshake_clear(struct noise_handshake *handshake)
+{
+ wg_index_hashtable_remove(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+ down_write(&handshake->lock);
+ handshake_zero(handshake);
+ up_write(&handshake->lock);
+ wg_index_hashtable_remove(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+}
+
+static struct noise_keypair *keypair_create(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL);
+
+ if (unlikely(!keypair))
+ return NULL;
+ keypair->internal_id = atomic64_inc_return(&keypair_counter);
+ keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
+ keypair->entry.peer = peer;
+ kref_init(&keypair->refcount);
+ return keypair;
+}
+
+static void keypair_free_rcu(struct rcu_head *rcu)
+{
+ kzfree(container_of(rcu, struct noise_keypair, rcu));
+}
+
+static void keypair_free_kref(struct kref *kref)
+{
+ struct noise_keypair *keypair =
+ container_of(kref, struct noise_keypair, refcount);
+
+ net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n",
+ keypair->entry.peer->device->dev->name,
+ keypair->internal_id,
+ keypair->entry.peer->internal_id);
+ wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
+ call_rcu(&keypair->rcu, keypair_free_rcu);
+}
+
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
+{
+ if (unlikely(!keypair))
+ return;
+ if (unlikely(unreference_now))
+ wg_index_hashtable_remove(
+ keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
+ kref_put(&keypair->refcount, keypair_free_kref);
+}
+
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
+ "Taking noise keypair reference without holding the RCU BH read lock");
+ if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount)))
+ return NULL;
+ return keypair;
+}
+
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
+{
+ struct noise_keypair *old;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+
+ /* We zero the next_keypair before zeroing the others, so that
+ * wg_noise_received_with_keypair returns early before subsequent ones
+ * are zeroed.
+ */
+ old = rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ old = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ old = rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->current_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+}
+
+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+
+ spin_lock_bh(&peer->keypairs.keypair_update_lock);
+ keypair = rcu_dereference_protected(peer->keypairs.next_keypair,
+ lockdep_is_held(&peer->keypairs.keypair_update_lock));
+ if (keypair)
+ keypair->sending.is_valid = false;
+ keypair = rcu_dereference_protected(peer->keypairs.current_keypair,
+ lockdep_is_held(&peer->keypairs.keypair_update_lock));
+ if (keypair)
+ keypair->sending.is_valid = false;
+ spin_unlock_bh(&peer->keypairs.keypair_update_lock);
+}
+
+static void add_new_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *new_keypair)
+{
+ struct noise_keypair *previous_keypair, *next_keypair, *current_keypair;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+ previous_keypair = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ next_keypair = rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ current_keypair = rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ if (new_keypair->i_am_the_initiator) {
+ /* If we're the initiator, it means we've sent a handshake, and
+ * received a confirmation response, which means this new
+ * keypair can now be used.
+ */
+ if (next_keypair) {
+ /* If there already was a next keypair pending, we
+ * demote it to be the previous keypair, and free the
+ * existing current. Note that this means KCI can result
+ * in this transition. It would perhaps be more sound to
+ * always just get rid of the unused next keypair
+ * instead of putting it in the previous slot, but this
+ * might be a bit less robust. Something to think about
+ * for the future.
+ */
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+ rcu_assign_pointer(keypairs->previous_keypair,
+ next_keypair);
+ wg_noise_keypair_put(current_keypair, true);
+ } else /* If there wasn't an existing next keypair, we replace
+ * the previous with the current one.
+ */
+ rcu_assign_pointer(keypairs->previous_keypair,
+ current_keypair);
+ /* At this point we can get rid of the old previous keypair, and
+ * set up the new keypair.
+ */
+ wg_noise_keypair_put(previous_keypair, true);
+ rcu_assign_pointer(keypairs->current_keypair, new_keypair);
+ } else {
+ /* If we're the responder, it means we can't use the new keypair
+ * until we receive confirmation via the first data packet, so
+ * we get rid of the existing previous one, the possibly
+ * existing next one, and slide in the new next one.
+ */
+ rcu_assign_pointer(keypairs->next_keypair, new_keypair);
+ wg_noise_keypair_put(next_keypair, true);
+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
+ wg_noise_keypair_put(previous_keypair, true);
+ }
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+}
+
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair)
+{
+ struct noise_keypair *old_keypair;
+ bool key_is_new;
+
+ /* We first check without taking the spinlock. */
+ key_is_new = received_keypair ==
+ rcu_access_pointer(keypairs->next_keypair);
+ if (likely(!key_is_new))
+ return false;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+ /* After locking, we double check that things didn't change from
+ * beneath us.
+ */
+ if (unlikely(received_keypair !=
+ rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock)))) {
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+ return false;
+ }
+
+ /* When we've finally received the confirmation, we slide the next
+ * into the current, the current into the previous, and get rid of
+ * the old previous.
+ */
+ old_keypair = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ rcu_assign_pointer(keypairs->previous_keypair,
+ rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock)));
+ wg_noise_keypair_put(old_keypair, true);
+ rcu_assign_pointer(keypairs->current_keypair, received_keypair);
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+ return true;
+}
+
+/* Must hold static_identity->lock */
+void wg_noise_set_static_identity_private_key(
+ struct noise_static_identity *static_identity,
+ const u8 private_key[NOISE_PUBLIC_KEY_LEN])
+{
+ memcpy(static_identity->static_private, private_key,
+ NOISE_PUBLIC_KEY_LEN);
+ curve25519_clamp_secret(static_identity->static_private);
+ static_identity->has_identity = curve25519_generate_public(
+ static_identity->static_public, private_key);
+}
+
+/* This is Hugo Krawczyk's HKDF:
+ * - https://eprint.iacr.org/2010/264.pdf
+ * - https://tools.ietf.org/html/rfc5869
+ */
+static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
+ size_t first_len, size_t second_len, size_t third_len,
+ size_t data_len, const u8 chaining_key[NOISE_HASH_LEN])
+{
+ u8 output[BLAKE2S_HASH_SIZE + 1];
+ u8 secret[BLAKE2S_HASH_SIZE];
+
+ WARN_ON(IS_ENABLED(DEBUG) &&
+ (first_len > BLAKE2S_HASH_SIZE ||
+ second_len > BLAKE2S_HASH_SIZE ||
+ third_len > BLAKE2S_HASH_SIZE ||
+ ((second_len || second_dst || third_len || third_dst) &&
+ (!first_len || !first_dst)) ||
+ ((third_len || third_dst) && (!second_len || !second_dst))));
+
+ /* Extract entropy from data into secret */
+ blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+
+ if (!first_dst || !first_len)
+ goto out;
+
+ /* Expand first key: key = secret, data = 0x1 */
+ output[0] = 1;
+ blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ memcpy(first_dst, output, first_len);
+
+ if (!second_dst || !second_len)
+ goto out;
+
+ /* Expand second key: key = secret, data = first-key || 0x2 */
+ output[BLAKE2S_HASH_SIZE] = 2;
+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
+ BLAKE2S_HASH_SIZE);
+ memcpy(second_dst, output, second_len);
+
+ if (!third_dst || !third_len)
+ goto out;
+
+ /* Expand third key: key = secret, data = second-key || 0x3 */
+ output[BLAKE2S_HASH_SIZE] = 3;
+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
+ BLAKE2S_HASH_SIZE);
+ memcpy(third_dst, output, third_len);
+
+out:
+ /* Clear sensitive data from stack */
+ memzero_explicit(secret, BLAKE2S_HASH_SIZE);
+ memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
+}
+
+static void symmetric_key_init(struct noise_symmetric_key *key)
+{
+ spin_lock_init(&key->counter.receive.lock);
+ atomic64_set(&key->counter.counter, 0);
+ memset(key->counter.receive.backtrack, 0,
+ sizeof(key->counter.receive.backtrack));
+ key->birthdate = ktime_get_coarse_boottime_ns();
+ key->is_valid = true;
+}
+
+static void derive_keys(struct noise_symmetric_key *first_dst,
+ struct noise_symmetric_key *second_dst,
+ const u8 chaining_key[NOISE_HASH_LEN])
+{
+ kdf(first_dst->key, second_dst->key, NULL, NULL,
+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
+ chaining_key);
+ symmetric_key_init(first_dst);
+ symmetric_key_init(second_dst);
+}
+
+static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
+ u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 private[NOISE_PUBLIC_KEY_LEN],
+ const u8 public[NOISE_PUBLIC_KEY_LEN])
+{
+ u8 dh_calculation[NOISE_PUBLIC_KEY_LEN];
+
+ if (unlikely(!curve25519(dh_calculation, private, public)))
+ return false;
+ kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key);
+ memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN);
+ return true;
+}
+
+static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
+{
+ struct blake2s_state blake;
+
+ blake2s_init(&blake, NOISE_HASH_LEN);
+ blake2s_update(&blake, hash, NOISE_HASH_LEN);
+ blake2s_update(&blake, src, src_len);
+ blake2s_final(&blake, hash);
+}
+
+static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN],
+ u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 psk[NOISE_SYMMETRIC_KEY_LEN])
+{
+ u8 temp_hash[NOISE_HASH_LEN];
+
+ kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key);
+ mix_hash(hash, temp_hash, NOISE_HASH_LEN);
+ memzero_explicit(temp_hash, NOISE_HASH_LEN);
+}
+
+static void handshake_init(u8 chaining_key[NOISE_HASH_LEN],
+ u8 hash[NOISE_HASH_LEN],
+ const u8 remote_static[NOISE_PUBLIC_KEY_LEN])
+{
+ memcpy(hash, handshake_init_hash, NOISE_HASH_LEN);
+ memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN);
+ mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN);
+}
+
+static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext,
+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash,
+ NOISE_HASH_LEN,
+ 0 /* Always zero for Noise_IK */, key);
+ mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len));
+}
+
+static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext,
+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len,
+ hash, NOISE_HASH_LEN,
+ 0 /* Always zero for Noise_IK */, key))
+ return false;
+ mix_hash(hash, src_ciphertext, src_len);
+ return true;
+}
+
+static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN],
+ const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN],
+ u8 chaining_key[NOISE_HASH_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ if (ephemeral_dst != ephemeral_src)
+ memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
+ mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
+ kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0,
+ NOISE_PUBLIC_KEY_LEN, chaining_key);
+}
+
+static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN])
+{
+ struct timespec64 now;
+
+ ktime_get_real_ts64(&now);
+
+ /* In order to prevent some sort of infoleak from precise timers, we
+ * round down the nanoseconds part to the closest rounded-down power of
+ * two to the maximum initiations per second allowed anyway by the
+ * implementation.
+ */
+ now.tv_nsec = ALIGN_DOWN(now.tv_nsec,
+ rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND));
+
+ /* https://cr.yp.to/libtai/tai64.html */
+ *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec);
+ *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec);
+}
+
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake)
+{
+ u8 timestamp[NOISE_TIMESTAMP_LEN];
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ bool ret = false;
+
+ /* We need to wait for crng _before_ taking any locks, since
+ * curve25519_generate_secret uses get_random_bytes_wait.
+ */
+ wait_for_random_bytes();
+
+ down_read(&handshake->static_identity->lock);
+ down_write(&handshake->lock);
+
+ if (unlikely(!handshake->static_identity->has_identity))
+ goto out;
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION);
+
+ handshake_init(handshake->chaining_key, handshake->hash,
+ handshake->remote_static);
+
+ /* e */
+ curve25519_generate_secret(handshake->ephemeral_private);
+ if (!curve25519_generate_public(dst->unencrypted_ephemeral,
+ handshake->ephemeral_private))
+ goto out;
+ message_ephemeral(dst->unencrypted_ephemeral,
+ dst->unencrypted_ephemeral, handshake->chaining_key,
+ handshake->hash);
+
+ /* es */
+ if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private,
+ handshake->remote_static))
+ goto out;
+
+ /* s */
+ message_encrypt(dst->encrypted_static,
+ handshake->static_identity->static_public,
+ NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
+
+ /* ss */
+ kdf(handshake->chaining_key, key, NULL,
+ handshake->precomputed_static_static, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+ handshake->chaining_key);
+
+ /* {t} */
+ tai64n_now(timestamp);
+ message_encrypt(dst->encrypted_timestamp, timestamp,
+ NOISE_TIMESTAMP_LEN, key, handshake->hash);
+
+ dst->sender_index = wg_index_hashtable_insert(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+
+ handshake->state = HANDSHAKE_CREATED_INITIATION;
+ ret = true;
+
+out:
+ up_write(&handshake->lock);
+ up_read(&handshake->static_identity->lock);
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ return ret;
+}
+
+struct wg_peer *
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wg_device *wg)
+{
+ struct wg_peer *peer = NULL, *ret_peer = NULL;
+ struct noise_handshake *handshake;
+ bool replay_attack, flood_attack;
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+ u8 hash[NOISE_HASH_LEN];
+ u8 s[NOISE_PUBLIC_KEY_LEN];
+ u8 e[NOISE_PUBLIC_KEY_LEN];
+ u8 t[NOISE_TIMESTAMP_LEN];
+ u64 initiation_consumption;
+
+ down_read(&wg->static_identity.lock);
+ if (unlikely(!wg->static_identity.has_identity))
+ goto out;
+
+ handshake_init(chaining_key, hash, wg->static_identity.static_public);
+
+ /* e */
+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
+
+ /* es */
+ if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e))
+ goto out;
+
+ /* s */
+ if (!message_decrypt(s, src->encrypted_static,
+ sizeof(src->encrypted_static), key, hash))
+ goto out;
+
+ /* Lookup which peer we're actually talking to */
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s);
+ if (!peer)
+ goto out;
+ handshake = &peer->handshake;
+
+ /* ss */
+ kdf(chaining_key, key, NULL, handshake->precomputed_static_static,
+ NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+ chaining_key);
+
+ /* {t} */
+ if (!message_decrypt(t, src->encrypted_timestamp,
+ sizeof(src->encrypted_timestamp), key, hash))
+ goto out;
+
+ down_read(&handshake->lock);
+ replay_attack = memcmp(t, handshake->latest_timestamp,
+ NOISE_TIMESTAMP_LEN) <= 0;
+ flood_attack = (s64)handshake->last_initiation_consumption +
+ NSEC_PER_SEC / INITIATIONS_PER_SECOND >
+ (s64)ktime_get_coarse_boottime_ns();
+ up_read(&handshake->lock);
+ if (replay_attack || flood_attack)
+ goto out;
+
+ /* Success! Copy everything to peer */
+ down_write(&handshake->lock);
+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
+ if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0)
+ memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN);
+ memcpy(handshake->hash, hash, NOISE_HASH_LEN);
+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
+ handshake->remote_index = src->sender_index;
+ if ((s64)(handshake->last_initiation_consumption -
+ (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+ handshake->last_initiation_consumption = initiation_consumption;
+ handshake->state = HANDSHAKE_CONSUMED_INITIATION;
+ up_write(&handshake->lock);
+ ret_peer = peer;
+
+out:
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ memzero_explicit(hash, NOISE_HASH_LEN);
+ memzero_explicit(chaining_key, NOISE_HASH_LEN);
+ up_read(&wg->static_identity.lock);
+ if (!ret_peer)
+ wg_peer_put(peer);
+ return ret_peer;
+}
+
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake)
+{
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ bool ret = false;
+
+ /* We need to wait for crng _before_ taking any locks, since
+ * curve25519_generate_secret uses get_random_bytes_wait.
+ */
+ wait_for_random_bytes();
+
+ down_read(&handshake->static_identity->lock);
+ down_write(&handshake->lock);
+
+ if (handshake->state != HANDSHAKE_CONSUMED_INITIATION)
+ goto out;
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE);
+ dst->receiver_index = handshake->remote_index;
+
+ /* e */
+ curve25519_generate_secret(handshake->ephemeral_private);
+ if (!curve25519_generate_public(dst->unencrypted_ephemeral,
+ handshake->ephemeral_private))
+ goto out;
+ message_ephemeral(dst->unencrypted_ephemeral,
+ dst->unencrypted_ephemeral, handshake->chaining_key,
+ handshake->hash);
+
+ /* ee */
+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
+ handshake->remote_ephemeral))
+ goto out;
+
+ /* se */
+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
+ handshake->remote_static))
+ goto out;
+
+ /* psk */
+ mix_psk(handshake->chaining_key, handshake->hash, key,
+ handshake->preshared_key);
+
+ /* {} */
+ message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash);
+
+ dst->sender_index = wg_index_hashtable_insert(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+
+ handshake->state = HANDSHAKE_CREATED_RESPONSE;
+ ret = true;
+
+out:
+ up_write(&handshake->lock);
+ up_read(&handshake->static_identity->lock);
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ return ret;
+}
+
+struct wg_peer *
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wg_device *wg)
+{
+ enum noise_handshake_state state = HANDSHAKE_ZEROED;
+ struct wg_peer *peer = NULL, *ret_peer = NULL;
+ struct noise_handshake *handshake;
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 hash[NOISE_HASH_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+ u8 e[NOISE_PUBLIC_KEY_LEN];
+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
+ u8 static_private[NOISE_PUBLIC_KEY_LEN];
+
+ down_read(&wg->static_identity.lock);
+
+ if (unlikely(!wg->static_identity.has_identity))
+ goto out;
+
+ handshake = (struct noise_handshake *)wg_index_hashtable_lookup(
+ wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE,
+ src->receiver_index, &peer);
+ if (unlikely(!handshake))
+ goto out;
+
+ down_read(&handshake->lock);
+ state = handshake->state;
+ memcpy(hash, handshake->hash, NOISE_HASH_LEN);
+ memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
+ memcpy(ephemeral_private, handshake->ephemeral_private,
+ NOISE_PUBLIC_KEY_LEN);
+ up_read(&handshake->lock);
+
+ if (state != HANDSHAKE_CREATED_INITIATION)
+ goto fail;
+
+ /* e */
+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
+
+ /* ee */
+ if (!mix_dh(chaining_key, NULL, ephemeral_private, e))
+ goto fail;
+
+ /* se */
+ if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e))
+ goto fail;
+
+ /* psk */
+ mix_psk(chaining_key, hash, key, handshake->preshared_key);
+
+ /* {} */
+ if (!message_decrypt(NULL, src->encrypted_nothing,
+ sizeof(src->encrypted_nothing), key, hash))
+ goto fail;
+
+ /* Success! Copy everything to peer */
+ down_write(&handshake->lock);
+ /* It's important to check that the state is still the same, while we
+ * have an exclusive lock.
+ */
+ if (handshake->state != state) {
+ up_write(&handshake->lock);
+ goto fail;
+ }
+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
+ memcpy(handshake->hash, hash, NOISE_HASH_LEN);
+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
+ handshake->remote_index = src->sender_index;
+ handshake->state = HANDSHAKE_CONSUMED_RESPONSE;
+ up_write(&handshake->lock);
+ ret_peer = peer;
+ goto out;
+
+fail:
+ wg_peer_put(peer);
+out:
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ memzero_explicit(hash, NOISE_HASH_LEN);
+ memzero_explicit(chaining_key, NOISE_HASH_LEN);
+ memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
+ memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+ up_read(&wg->static_identity.lock);
+ return ret_peer;
+}
+
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs)
+{
+ struct noise_keypair *new_keypair;
+ bool ret = false;
+
+ down_write(&handshake->lock);
+ if (handshake->state != HANDSHAKE_CREATED_RESPONSE &&
+ handshake->state != HANDSHAKE_CONSUMED_RESPONSE)
+ goto out;
+
+ new_keypair = keypair_create(handshake->entry.peer);
+ if (!new_keypair)
+ goto out;
+ new_keypair->i_am_the_initiator = handshake->state ==
+ HANDSHAKE_CONSUMED_RESPONSE;
+ new_keypair->remote_index = handshake->remote_index;
+
+ if (new_keypair->i_am_the_initiator)
+ derive_keys(&new_keypair->sending, &new_keypair->receiving,
+ handshake->chaining_key);
+ else
+ derive_keys(&new_keypair->receiving, &new_keypair->sending,
+ handshake->chaining_key);
+
+ handshake_zero(handshake);
+ rcu_read_lock_bh();
+ if (likely(!READ_ONCE(container_of(handshake, struct wg_peer,
+ handshake)->is_dead))) {
+ add_new_keypair(keypairs, new_keypair);
+ net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n",
+ handshake->entry.peer->device->dev->name,
+ new_keypair->internal_id,
+ handshake->entry.peer->internal_id);
+ ret = wg_index_hashtable_replace(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry, &new_keypair->entry);
+ } else {
+ kzfree(new_keypair);
+ }
+ rcu_read_unlock_bh();
+
+out:
+ up_write(&handshake->lock);
+ return ret;
+}
diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h
new file mode 100644
index 000000000000..138a07bb817c
--- /dev/null
+++ b/drivers/net/wireguard/noise.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+#ifndef _WG_NOISE_H
+#define _WG_NOISE_H
+
+#include "messages.h"
+#include "peerlookup.h"
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+
+union noise_counter {
+ struct {
+ u64 counter;
+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
+ spinlock_t lock;
+ } receive;
+ atomic64_t counter;
+};
+
+struct noise_symmetric_key {
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ union noise_counter counter;
+ u64 birthdate;
+ bool is_valid;
+};
+
+struct noise_keypair {
+ struct index_hashtable_entry entry;
+ struct noise_symmetric_key sending;
+ struct noise_symmetric_key receiving;
+ __le32 remote_index;
+ bool i_am_the_initiator;
+ struct kref refcount;
+ struct rcu_head rcu;
+ u64 internal_id;
+};
+
+struct noise_keypairs {
+ struct noise_keypair __rcu *current_keypair;
+ struct noise_keypair __rcu *previous_keypair;
+ struct noise_keypair __rcu *next_keypair;
+ spinlock_t keypair_update_lock;
+};
+
+struct noise_static_identity {
+ u8 static_public[NOISE_PUBLIC_KEY_LEN];
+ u8 static_private[NOISE_PUBLIC_KEY_LEN];
+ struct rw_semaphore lock;
+ bool has_identity;
+};
+
+enum noise_handshake_state {
+ HANDSHAKE_ZEROED,
+ HANDSHAKE_CREATED_INITIATION,
+ HANDSHAKE_CONSUMED_INITIATION,
+ HANDSHAKE_CREATED_RESPONSE,
+ HANDSHAKE_CONSUMED_RESPONSE
+};
+
+struct noise_handshake {
+ struct index_hashtable_entry entry;
+
+ enum noise_handshake_state state;
+ u64 last_initiation_consumption;
+
+ struct noise_static_identity *static_identity;
+
+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
+ u8 remote_static[NOISE_PUBLIC_KEY_LEN];
+ u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN];
+
+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
+
+ u8 hash[NOISE_HASH_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+
+ u8 latest_timestamp[NOISE_TIMESTAMP_LEN];
+ __le32 remote_index;
+
+ /* Protects all members except the immutable (after noise_handshake_
+ * init): remote_static, precomputed_static_static, static_identity.
+ */
+ struct rw_semaphore lock;
+};
+
+struct wg_device;
+
+void wg_noise_init(void);
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wg_peer *peer);
+void wg_noise_handshake_clear(struct noise_handshake *handshake);
+static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns)
+{
+ atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() -
+ (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
+}
+
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now);
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair);
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs);
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair);
+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer);
+
+void wg_noise_set_static_identity_private_key(
+ struct noise_static_identity *static_identity,
+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
+bool wg_noise_precompute_static_static(struct wg_peer *peer);
+
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake);
+struct wg_peer *
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wg_device *wg);
+
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake);
+struct wg_peer *
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wg_device *wg);
+
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs);
+
+#endif /* _WG_NOISE_H */
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
new file mode 100644
index 000000000000..071eedf33f5a
--- /dev/null
+++ b/drivers/net/wireguard/peer.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "peer.h"
+#include "device.h"
+#include "queueing.h"
+#include "timers.h"
+#include "peerlookup.h"
+#include "noise.h"
+
+#include <linux/kref.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+
+static atomic64_t peer_counter = ATOMIC64_INIT(0);
+
+struct wg_peer *wg_peer_create(struct wg_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
+{
+ struct wg_peer *peer;
+ int ret = -ENOMEM;
+
+ lockdep_assert_held(&wg->device_update_lock);
+
+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
+ return ERR_PTR(ret);
+
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (unlikely(!peer))
+ return ERR_PTR(ret);
+ peer->device = wg;
+
+ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+ public_key, preshared_key, peer)) {
+ ret = -EKEYREJECTED;
+ goto err_1;
+ }
+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ goto err_1;
+ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
+ MAX_QUEUED_PACKETS))
+ goto err_2;
+ if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
+ MAX_QUEUED_PACKETS))
+ goto err_3;
+
+ peer->internal_id = atomic64_inc_return(&peer_counter);
+ peer->serial_work_cpu = nr_cpumask_bits;
+ wg_cookie_init(&peer->latest_cookie);
+ wg_timers_init(peer);
+ wg_cookie_checker_precompute_peer_keys(peer);
+ spin_lock_init(&peer->keypairs.keypair_update_lock);
+ INIT_WORK(&peer->transmit_handshake_work,
+ wg_packet_handshake_send_worker);
+ rwlock_init(&peer->endpoint_lock);
+ kref_init(&peer->refcount);
+ skb_queue_head_init(&peer->staged_packet_queue);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&peer->napi);
+ list_add_tail(&peer->peer_list, &wg->peer_list);
+ INIT_LIST_HEAD(&peer->allowedips_list);
+ wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
+ ++wg->num_peers;
+ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
+ return peer;
+
+err_3:
+ wg_packet_queue_free(&peer->tx_queue, false);
+err_2:
+ dst_cache_destroy(&peer->endpoint_cache);
+err_1:
+ kfree(peer);
+ return ERR_PTR(ret);
+}
+
+struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
+ "Taking peer reference without holding the RCU read lock");
+ if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
+ return NULL;
+ return peer;
+}
+
+static void peer_make_dead(struct wg_peer *peer)
+{
+ /* Remove from configuration-time lookup structures. */
+ list_del_init(&peer->peer_list);
+ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
+ &peer->device->device_update_lock);
+ wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
+
+ /* Mark as dead, so that we don't allow jumping contexts after. */
+ WRITE_ONCE(peer->is_dead, true);
+
+ /* The caller must now synchronize_rcu() for this to take effect. */
+}
+
+static void peer_remove_after_dead(struct wg_peer *peer)
+{
+ WARN_ON(!peer->is_dead);
+
+ /* No more keypairs can be created for this peer, since is_dead protects
+ * add_new_keypair, so we can now destroy existing ones.
+ */
+ wg_noise_keypairs_clear(&peer->keypairs);
+
+ /* Destroy all ongoing timers that were in-flight at the beginning of
+ * this function.
+ */
+ wg_timers_stop(peer);
+
+ /* The transition between packet encryption/decryption queues isn't
+ * guarded by is_dead, but each reference's life is strictly bounded by
+ * two generations: once for parallel crypto and once for serial
+ * ingestion, so we can simply flush twice, and be sure that we no
+ * longer have references inside these queues.
+ */
+
+ /* a) For encrypt/decrypt. */
+ flush_workqueue(peer->device->packet_crypt_wq);
+ /* b.1) For send (but not receive, since that's napi). */
+ flush_workqueue(peer->device->packet_crypt_wq);
+ /* b.2.1) For receive (but not send, since that's wq). */
+ napi_disable(&peer->napi);
+ /* b.2.1) It's now safe to remove the napi struct, which must be done
+ * here from process context.
+ */
+ netif_napi_del(&peer->napi);
+
+ /* Ensure any workstructs we own (like transmit_handshake_work or
+ * clear_peer_work) no longer are in use.
+ */
+ flush_workqueue(peer->device->handshake_send_wq);
+
+ /* After the above flushes, a peer might still be active in a few
+ * different contexts: 1) from xmit(), before hitting is_dead and
+ * returning, 2) from wg_packet_consume_data(), before hitting is_dead
+ * and returning, 3) from wg_receive_handshake_packet() after a point
+ * where it has processed an incoming handshake packet, but where
+ * all calls to pass it off to timers fails because of is_dead. We won't
+ * have new references in (1) eventually, because we're removed from
+ * allowedips; we won't have new references in (2) eventually, because
+ * wg_index_hashtable_lookup will always return NULL, since we removed
+ * all existing keypairs and no more can be created; we won't have new
+ * references in (3) eventually, because we're removed from the pubkey
+ * hash table, which allows for a maximum of one handshake response,
+ * via the still-uncleared index hashtable entry, but not more than one,
+ * and in wg_cookie_message_consume, the lookup eventually gets a peer
+ * with a refcount of zero, so no new reference is taken.
+ */
+
+ --peer->device->num_peers;
+ wg_peer_put(peer);
+}
+
+/* We have a separate "remove" function make sure that all active places where
+ * a peer is currently operating will eventually come to an end and not pass
+ * their reference onto another context.
+ */
+void wg_peer_remove(struct wg_peer *peer)
+{
+ if (unlikely(!peer))
+ return;
+ lockdep_assert_held(&peer->device->device_update_lock);
+
+ peer_make_dead(peer);
+ synchronize_rcu();
+ peer_remove_after_dead(peer);
+}
+
+void wg_peer_remove_all(struct wg_device *wg)
+{
+ struct wg_peer *peer, *temp;
+ LIST_HEAD(dead_peers);
+
+ lockdep_assert_held(&wg->device_update_lock);
+
+ /* Avoid having to traverse individually for each one. */
+ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
+
+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
+ peer_make_dead(peer);
+ list_add_tail(&peer->peer_list, &dead_peers);
+ }
+ synchronize_rcu();
+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
+ peer_remove_after_dead(peer);
+}
+
+static void rcu_release(struct rcu_head *rcu)
+{
+ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
+
+ dst_cache_destroy(&peer->endpoint_cache);
+ wg_packet_queue_free(&peer->rx_queue, false);
+ wg_packet_queue_free(&peer->tx_queue, false);
+
+ /* The final zeroing takes care of clearing any remaining handshake key
+ * material and other potentially sensitive information.
+ */
+ kzfree(peer);
+}
+
+static void kref_release(struct kref *refcount)
+{
+ struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
+
+ pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ /* Remove ourself from dynamic runtime lookup structures, now that the
+ * last reference is gone.
+ */
+ wg_index_hashtable_remove(peer->device->index_hashtable,
+ &peer->handshake.entry);
+
+ /* Remove any lingering packets that didn't have a chance to be
+ * transmitted.
+ */
+ wg_packet_purge_staged_packets(peer);
+
+ /* Free the memory used. */
+ call_rcu(&peer->rcu, rcu_release);
+}
+
+void wg_peer_put(struct wg_peer *peer)
+{
+ if (unlikely(!peer))
+ return;
+ kref_put(&peer->refcount, kref_release);
+}
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
new file mode 100644
index 000000000000..23af40922997
--- /dev/null
+++ b/drivers/net/wireguard/peer.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_PEER_H
+#define _WG_PEER_H
+
+#include "device.h"
+#include "noise.h"
+#include "cookie.h"
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <net/dst_cache.h>
+
+struct wg_device;
+
+struct endpoint {
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ };
+ union {
+ struct {
+ struct in_addr src4;
+ /* Essentially the same as addr6->scope_id */
+ int src_if4;
+ };
+ struct in6_addr src6;
+ };
+};
+
+struct wg_peer {
+ struct wg_device *device;
+ struct crypt_queue tx_queue, rx_queue;
+ struct sk_buff_head staged_packet_queue;
+ int serial_work_cpu;
+ struct noise_keypairs keypairs;
+ struct endpoint endpoint;
+ struct dst_cache endpoint_cache;
+ rwlock_t endpoint_lock;
+ struct noise_handshake handshake;
+ atomic64_t last_sent_handshake;
+ struct work_struct transmit_handshake_work, clear_peer_work;
+ struct cookie latest_cookie;
+ struct hlist_node pubkey_hash;
+ u64 rx_bytes, tx_bytes;
+ struct timer_list timer_retransmit_handshake, timer_send_keepalive;
+ struct timer_list timer_new_handshake, timer_zero_key_material;
+ struct timer_list timer_persistent_keepalive;
+ unsigned int timer_handshake_attempts;
+ u16 persistent_keepalive_interval;
+ bool timer_need_another_keepalive;
+ bool sent_lastminute_handshake;
+ struct timespec64 walltime_last_handshake;
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct list_head peer_list;
+ struct list_head allowedips_list;
+ u64 internal_id;
+ struct napi_struct napi;
+ bool is_dead;
+};
+
+struct wg_peer *wg_peer_create(struct wg_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]);
+
+struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer);
+static inline struct wg_peer *wg_peer_get(struct wg_peer *peer)
+{
+ kref_get(&peer->refcount);
+ return peer;
+}
+void wg_peer_put(struct wg_peer *peer);
+void wg_peer_remove(struct wg_peer *peer);
+void wg_peer_remove_all(struct wg_device *wg);
+
+#endif /* _WG_PEER_H */
diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c
new file mode 100644
index 000000000000..e4deb331476b
--- /dev/null
+++ b/drivers/net/wireguard/peerlookup.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "peerlookup.h"
+#include "peer.h"
+#include "noise.h"
+
+static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+{
+ /* siphash gives us a secure 64bit number based on a random key. Since
+ * the bits are uniformly distributed, we can then mask off to get the
+ * bits we need.
+ */
+ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
+
+ return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
+{
+ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ get_random_bytes(&table->key, sizeof(table->key));
+ hash_init(table->hashtable);
+ mutex_init(&table->lock);
+ return table;
+}
+
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wg_peer *peer)
+{
+ mutex_lock(&table->lock);
+ hlist_add_head_rcu(&peer->pubkey_hash,
+ pubkey_bucket(table, peer->handshake.remote_static));
+ mutex_unlock(&table->lock);
+}
+
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wg_peer *peer)
+{
+ mutex_lock(&table->lock);
+ hlist_del_init_rcu(&peer->pubkey_hash);
+ mutex_unlock(&table->lock);
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+{
+ struct wg_peer *iter_peer, *peer = NULL;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
+ pubkey_hash) {
+ if (!memcmp(pubkey, iter_peer->handshake.remote_static,
+ NOISE_PUBLIC_KEY_LEN)) {
+ peer = iter_peer;
+ break;
+ }
+ }
+ peer = wg_peer_get_maybe_zero(peer);
+ rcu_read_unlock_bh();
+ return peer;
+}
+
+static struct hlist_head *index_bucket(struct index_hashtable *table,
+ const __le32 index)
+{
+ /* Since the indices are random and thus all bits are uniformly
+ * distributed, we can find its bucket simply by masking.
+ */
+ return &table->hashtable[(__force u32)index &
+ (HASH_SIZE(table->hashtable) - 1)];
+}
+
+struct index_hashtable *wg_index_hashtable_alloc(void)
+{
+ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ hash_init(table->hashtable);
+ spin_lock_init(&table->lock);
+ return table;
+}
+
+/* At the moment, we limit ourselves to 2^20 total peers, which generally might
+ * amount to 2^20*3 items in this hashtable. The algorithm below works by
+ * picking a random number and testing it. We can see that these limits mean we
+ * usually succeed pretty quickly:
+ *
+ * >>> def calculation(tries, size):
+ * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
+ * ...
+ * >>> calculation(1, 2**20 * 3)
+ * 0.999267578125
+ * >>> calculation(2, 2**20 * 3)
+ * 0.0007318854331970215
+ * >>> calculation(3, 2**20 * 3)
+ * 5.360489012673497e-07
+ * >>> calculation(4, 2**20 * 3)
+ * 3.9261394135792216e-10
+ *
+ * At the moment, we don't do any masking, so this algorithm isn't exactly
+ * constant time in either the random guessing or in the hash list lookup. We
+ * could require a minimum of 3 tries, which would successfully mask the
+ * guessing. this would not, however, help with the growing hash lengths, which
+ * is another thing to consider moving forward.
+ */
+
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
+{
+ struct index_hashtable_entry *existing_entry;
+
+ spin_lock_bh(&table->lock);
+ hlist_del_init_rcu(&entry->index_hash);
+ spin_unlock_bh(&table->lock);
+
+ rcu_read_lock_bh();
+
+search_unused_slot:
+ /* First we try to find an unused slot, randomly, while unlocked. */
+ entry->index = (__force __le32)get_random_u32();
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
+ if (existing_entry->index == entry->index)
+ /* If it's already in use, we continue searching. */
+ goto search_unused_slot;
+ }
+
+ /* Once we've found an unused slot, we lock it, and then double-check
+ * that nobody else stole it from us.
+ */
+ spin_lock_bh(&table->lock);
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
+ if (existing_entry->index == entry->index) {
+ spin_unlock_bh(&table->lock);
+ /* If it was stolen, we start over. */
+ goto search_unused_slot;
+ }
+ }
+ /* Otherwise, we know we have it exclusively (since we're locked),
+ * so we insert.
+ */
+ hlist_add_head_rcu(&entry->index_hash,
+ index_bucket(table, entry->index));
+ spin_unlock_bh(&table->lock);
+
+ rcu_read_unlock_bh();
+
+ return entry->index;
+}
+
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
+{
+ if (unlikely(hlist_unhashed(&old->index_hash)))
+ return false;
+ spin_lock_bh(&table->lock);
+ new->index = old->index;
+ hlist_replace_rcu(&old->index_hash, &new->index_hash);
+
+ /* Calling init here NULLs out index_hash, and in fact after this
+ * function returns, it's theoretically possible for this to get
+ * reinserted elsewhere. That means the RCU lookup below might either
+ * terminate early or jump between buckets, in which case the packet
+ * simply gets dropped, which isn't terrible.
+ */
+ INIT_HLIST_NODE(&old->index_hash);
+ spin_unlock_bh(&table->lock);
+ return true;
+}
+
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
+{
+ spin_lock_bh(&table->lock);
+ hlist_del_init_rcu(&entry->index_hash);
+ spin_unlock_bh(&table->lock);
+}
+
+/* Returns a strong reference to a entry->peer */
+struct index_hashtable_entry *
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wg_peer **peer)
+{
+ struct index_hashtable_entry *iter_entry, *entry = NULL;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
+ index_hash) {
+ if (iter_entry->index == index) {
+ if (likely(iter_entry->type & type_mask))
+ entry = iter_entry;
+ break;
+ }
+ }
+ if (likely(entry)) {
+ entry->peer = wg_peer_get_maybe_zero(entry->peer);
+ if (likely(entry->peer))
+ *peer = entry->peer;
+ else
+ entry = NULL;
+ }
+ rcu_read_unlock_bh();
+ return entry;
+}
diff --git a/drivers/net/wireguard/peerlookup.h b/drivers/net/wireguard/peerlookup.h
new file mode 100644
index 000000000000..ced811797680
--- /dev/null
+++ b/drivers/net/wireguard/peerlookup.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_PEERLOOKUP_H
+#define _WG_PEERLOOKUP_H
+
+#include "messages.h"
+
+#include <linux/hashtable.h>
+#include <linux/mutex.h>
+#include <linux/siphash.h>
+
+struct wg_peer;
+
+struct pubkey_hashtable {
+ /* TODO: move to rhashtable */
+ DECLARE_HASHTABLE(hashtable, 11);
+ siphash_key_t key;
+ struct mutex lock;
+};
+
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void);
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wg_peer *peer);
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wg_peer *peer);
+struct wg_peer *
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]);
+
+struct index_hashtable {
+ /* TODO: move to rhashtable */
+ DECLARE_HASHTABLE(hashtable, 13);
+ spinlock_t lock;
+};
+
+enum index_hashtable_type {
+ INDEX_HASHTABLE_HANDSHAKE = 1U << 0,
+ INDEX_HASHTABLE_KEYPAIR = 1U << 1
+};
+
+struct index_hashtable_entry {
+ struct wg_peer *peer;
+ struct hlist_node index_hash;
+ enum index_hashtable_type type;
+ __le32 index;
+};
+
+struct index_hashtable *wg_index_hashtable_alloc(void);
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new);
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+struct index_hashtable_entry *
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wg_peer **peer);
+
+#endif /* _WG_PEERLOOKUP_H */
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
new file mode 100644
index 000000000000..5c964fcb994e
--- /dev/null
+++ b/drivers/net/wireguard/queueing.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+
+struct multicore_worker __percpu *
+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+{
+ int cpu;
+ struct multicore_worker __percpu *worker =
+ alloc_percpu(struct multicore_worker);
+
+ if (!worker)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu_ptr(worker, cpu)->ptr = ptr;
+ INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
+ }
+ return worker;
+}
+
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len)
+{
+ int ret;
+
+ memset(queue, 0, sizeof(*queue));
+ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
+ if (ret)
+ return ret;
+ if (function) {
+ if (multicore) {
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(
+ function, queue);
+ if (!queue->worker)
+ return -ENOMEM;
+ } else {
+ INIT_WORK(&queue->work, function);
+ }
+ }
+ return 0;
+}
+
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
+{
+ if (multicore)
+ free_percpu(queue->worker);
+ WARN_ON(!__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, NULL);
+}
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
new file mode 100644
index 000000000000..fecb559cbdb6
--- /dev/null
+++ b/drivers/net/wireguard/queueing.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_QUEUEING_H
+#define _WG_QUEUEING_H
+
+#include "peer.h"
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct wg_device;
+struct wg_peer;
+struct multicore_worker;
+struct crypt_queue;
+struct sk_buff;
+
+/* queueing.c APIs: */
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
+struct multicore_worker __percpu *
+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
+
+/* receive.c APIs: */
+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
+void wg_packet_handshake_receive_worker(struct work_struct *work);
+/* NAPI poll function: */
+int wg_packet_rx_poll(struct napi_struct *napi, int budget);
+/* Workqueue worker: */
+void wg_packet_decrypt_worker(struct work_struct *work);
+
+/* send.c APIs: */
+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
+ bool is_retry);
+void wg_packet_send_handshake_response(struct wg_peer *peer);
+void wg_packet_send_handshake_cookie(struct wg_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index);
+void wg_packet_send_keepalive(struct wg_peer *peer);
+void wg_packet_purge_staged_packets(struct wg_peer *peer);
+void wg_packet_send_staged_packets(struct wg_peer *peer);
+/* Workqueue workers: */
+void wg_packet_handshake_send_worker(struct work_struct *work);
+void wg_packet_tx_worker(struct work_struct *work);
+void wg_packet_encrypt_worker(struct work_struct *work);
+
+enum packet_state {
+ PACKET_STATE_UNCRYPTED,
+ PACKET_STATE_CRYPTED,
+ PACKET_STATE_DEAD
+};
+
+struct packet_cb {
+ u64 nonce;
+ struct noise_keypair *keypair;
+ atomic_t state;
+ u32 mtu;
+ u8 ds;
+};
+
+#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
+#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
+{
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct iphdr)) <=
+ skb_tail_pointer(skb) &&
+ ip_hdr(skb)->version == 4)
+ return htons(ETH_P_IP);
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
+ skb_tail_pointer(skb) &&
+ ipv6_hdr(skb)->version == 6)
+ return htons(ETH_P_IPV6);
+ return 0;
+}
+
+static inline void wg_reset_packet(struct sk_buff *skb)
+{
+ skb_scrub_packet(skb, true);
+ memset(&skb->headers_start, 0,
+ offsetof(struct sk_buff, headers_end) -
+ offsetof(struct sk_buff, headers_start));
+ skb->queue_mapping = 0;
+ skb->nohdr = 0;
+ skb->peeked = 0;
+ skb->mac_len = 0;
+ skb->dev = NULL;
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+ skb_reset_tc(skb);
+#endif
+ skb->hdr_len = skb_headroom(skb);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb_probe_transport_header(skb);
+ skb_reset_inner_headers(skb);
+}
+
+static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+{
+ unsigned int cpu = *stored_cpu, cpu_index, i;
+
+ if (unlikely(cpu == nr_cpumask_bits ||
+ !cpumask_test_cpu(cpu, cpu_online_mask))) {
+ cpu_index = id % cpumask_weight(cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < cpu_index; ++i)
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ *stored_cpu = cpu;
+ }
+ return cpu;
+}
+
+/* This function is racy, in the sense that next is unlocked, so it could return
+ * the same CPU twice. A race-free version of this would be to instead store an
+ * atomic sequence number, do an increment-and-return, and then iterate through
+ * every possible CPU until we get to that index -- choose_cpu. However that's
+ * a bit slower, and it doesn't seem like this potential race actually
+ * introduces any performance loss, so we live with it.
+ */
+static inline int wg_cpumask_next_online(int *next)
+{
+ int cpu = *next;
+
+ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
+ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ return cpu;
+}
+
+static inline int wg_queue_enqueue_per_device_and_peer(
+ struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
+ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+{
+ int cpu;
+
+ atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
+ /* We first queue this up for the peer ingestion, but the consumer
+ * will wait for the state to change to CRYPTED or DEAD before.
+ */
+ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
+ return -ENOSPC;
+ /* Then we queue it up in the device queue, which consumes the
+ * packet as soon as it can.
+ */
+ cpu = wg_cpumask_next_online(next_cpu);
+ if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
+ return -EPIPE;
+ queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
+ return 0;
+}
+
+static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+ struct sk_buff *skb,
+ enum packet_state state)
+{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
+ peer->internal_id),
+ peer->device->packet_crypt_wq, &queue->work);
+ wg_peer_put(peer);
+}
+
+static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
+ enum packet_state state)
+{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+ napi_schedule(&peer->napi);
+ wg_peer_put(peer);
+}
+
+#ifdef DEBUG
+bool wg_packet_counter_selftest(void);
+#endif
+
+#endif /* _WG_QUEUEING_H */
diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c
new file mode 100644
index 000000000000..3fedd1d21f5e
--- /dev/null
+++ b/drivers/net/wireguard/ratelimiter.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "ratelimiter.h"
+#include <linux/siphash.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <net/ip.h>
+
+static struct kmem_cache *entry_cache;
+static hsiphash_key_t key;
+static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock");
+static DEFINE_MUTEX(init_lock);
+static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */
+static atomic_t total_entries = ATOMIC_INIT(0);
+static unsigned int max_entries, table_size;
+static void wg_ratelimiter_gc_entries(struct work_struct *);
+static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries);
+static struct hlist_head *table_v4;
+#if IS_ENABLED(CONFIG_IPV6)
+static struct hlist_head *table_v6;
+#endif
+
+struct ratelimiter_entry {
+ u64 last_time_ns, tokens, ip;
+ void *net;
+ spinlock_t lock;
+ struct hlist_node hash;
+ struct rcu_head rcu;
+};
+
+enum {
+ PACKETS_PER_SECOND = 20,
+ PACKETS_BURSTABLE = 5,
+ PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND,
+ TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE
+};
+
+static void entry_free(struct rcu_head *rcu)
+{
+ kmem_cache_free(entry_cache,
+ container_of(rcu, struct ratelimiter_entry, rcu));
+ atomic_dec(&total_entries);
+}
+
+static void entry_uninit(struct ratelimiter_entry *entry)
+{
+ hlist_del_rcu(&entry->hash);
+ call_rcu(&entry->rcu, entry_free);
+}
+
+/* Calling this function with a NULL work uninits all entries. */
+static void wg_ratelimiter_gc_entries(struct work_struct *work)
+{
+ const u64 now = ktime_get_coarse_boottime_ns();
+ struct ratelimiter_entry *entry;
+ struct hlist_node *temp;
+ unsigned int i;
+
+ for (i = 0; i < table_size; ++i) {
+ spin_lock(&table_lock);
+ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
+ if (unlikely(!work) ||
+ now - entry->last_time_ns > NSEC_PER_SEC)
+ entry_uninit(entry);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
+ if (unlikely(!work) ||
+ now - entry->last_time_ns > NSEC_PER_SEC)
+ entry_uninit(entry);
+ }
+#endif
+ spin_unlock(&table_lock);
+ if (likely(work))
+ cond_resched();
+ }
+ if (likely(work))
+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
+}
+
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
+{
+ /* We only take the bottom half of the net pointer, so that we can hash
+ * 3 words in the end. This way, siphash's len param fits into the final
+ * u32, and we don't incur an extra round.
+ */
+ const u32 net_word = (unsigned long)net;
+ struct ratelimiter_entry *entry;
+ struct hlist_head *bucket;
+ u64 ip;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip = (u64 __force)ip_hdr(skb)->saddr;
+ bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) &
+ (table_size - 1)];
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* Only use 64 bits, so as to ratelimit the whole /64. */
+ memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip));
+ bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) &
+ (table_size - 1)];
+ }
+#endif
+ else
+ return false;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, bucket, hash) {
+ if (entry->net == net && entry->ip == ip) {
+ u64 now, tokens;
+ bool ret;
+ /* Quasi-inspired by nft_limit.c, but this is actually a
+ * slightly different algorithm. Namely, we incorporate
+ * the burst as part of the maximum tokens, rather than
+ * as part of the rate.
+ */
+ spin_lock(&entry->lock);
+ now = ktime_get_coarse_boottime_ns();
+ tokens = min_t(u64, TOKEN_MAX,
+ entry->tokens + now -
+ entry->last_time_ns);
+ entry->last_time_ns = now;
+ ret = tokens >= PACKET_COST;
+ entry->tokens = ret ? tokens - PACKET_COST : tokens;
+ spin_unlock(&entry->lock);
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ if (atomic_inc_return(&total_entries) > max_entries)
+ goto err_oom;
+
+ entry = kmem_cache_alloc(entry_cache, GFP_KERNEL);
+ if (unlikely(!entry))
+ goto err_oom;
+
+ entry->net = net;
+ entry->ip = ip;
+ INIT_HLIST_NODE(&entry->hash);
+ spin_lock_init(&entry->lock);
+ entry->last_time_ns = ktime_get_coarse_boottime_ns();
+ entry->tokens = TOKEN_MAX - PACKET_COST;
+ spin_lock(&table_lock);
+ hlist_add_head_rcu(&entry->hash, bucket);
+ spin_unlock(&table_lock);
+ return true;
+
+err_oom:
+ atomic_dec(&total_entries);
+ return false;
+}
+
+int wg_ratelimiter_init(void)
+{
+ mutex_lock(&init_lock);
+ if (++init_refcnt != 1)
+ goto out;
+
+ entry_cache = KMEM_CACHE(ratelimiter_entry, 0);
+ if (!entry_cache)
+ goto err;
+
+ /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting,
+ * but what it shares in common is that it uses a massive hashtable. So,
+ * we borrow their wisdom about good table sizes on different systems
+ * dependent on RAM. This calculation here comes from there.
+ */
+ table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
+ max_t(unsigned long, 16, roundup_pow_of_two(
+ (totalram_pages() << PAGE_SHIFT) /
+ (1U << 14) / sizeof(struct hlist_head)));
+ max_entries = table_size * 8;
+
+ table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+ if (unlikely(!table_v4))
+ goto err_kmemcache;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+ if (unlikely(!table_v6)) {
+ kvfree(table_v4);
+ goto err_kmemcache;
+ }
+#endif
+
+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
+ get_random_bytes(&key, sizeof(key));
+out:
+ mutex_unlock(&init_lock);
+ return 0;
+
+err_kmemcache:
+ kmem_cache_destroy(entry_cache);
+err:
+ --init_refcnt;
+ mutex_unlock(&init_lock);
+ return -ENOMEM;
+}
+
+void wg_ratelimiter_uninit(void)
+{
+ mutex_lock(&init_lock);
+ if (!init_refcnt || --init_refcnt)
+ goto out;
+
+ cancel_delayed_work_sync(&gc_work);
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+ kvfree(table_v4);
+#if IS_ENABLED(CONFIG_IPV6)
+ kvfree(table_v6);
+#endif
+ kmem_cache_destroy(entry_cache);
+out:
+ mutex_unlock(&init_lock);
+}
+
+#include "selftest/ratelimiter.c"
diff --git a/drivers/net/wireguard/ratelimiter.h b/drivers/net/wireguard/ratelimiter.h
new file mode 100644
index 000000000000..83067f71ea99
--- /dev/null
+++ b/drivers/net/wireguard/ratelimiter.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_RATELIMITER_H
+#define _WG_RATELIMITER_H
+
+#include <linux/skbuff.h>
+
+int wg_ratelimiter_init(void);
+void wg_ratelimiter_uninit(void);
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net);
+
+#ifdef DEBUG
+bool wg_ratelimiter_selftest(void);
+#endif
+
+#endif /* _WG_RATELIMITER_H */
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
new file mode 100644
index 000000000000..9c6bab9c981f
--- /dev/null
+++ b/drivers/net/wireguard/receive.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "device.h"
+#include "peer.h"
+#include "timers.h"
+#include "messages.h"
+#include "cookie.h"
+#include "socket.h"
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <net/ip_tunnels.h>
+
+/* Must be called with bh disabled. */
+static void update_rx_stats(struct wg_peer *peer, size_t len)
+{
+ struct pcpu_sw_netstats *tstats =
+ get_cpu_ptr(peer->device->dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ ++tstats->rx_packets;
+ tstats->rx_bytes += len;
+ peer->rx_bytes += len;
+ u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
+}
+
+#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
+
+static size_t validate_header_len(struct sk_buff *skb)
+{
+ if (unlikely(skb->len < sizeof(struct message_header)))
+ return 0;
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
+ skb->len >= MESSAGE_MINIMUM_LENGTH)
+ return sizeof(struct message_data);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
+ skb->len == sizeof(struct message_handshake_initiation))
+ return sizeof(struct message_handshake_initiation);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
+ skb->len == sizeof(struct message_handshake_response))
+ return sizeof(struct message_handshake_response);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
+ skb->len == sizeof(struct message_handshake_cookie))
+ return sizeof(struct message_handshake_cookie);
+ return 0;
+}
+
+static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
+{
+ size_t data_offset, data_len, header_len;
+ struct udphdr *udp;
+
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
+ skb_transport_header(skb) < skb->head ||
+ (skb_transport_header(skb) + sizeof(struct udphdr)) >
+ skb_tail_pointer(skb)))
+ return -EINVAL; /* Bogus IP header */
+ udp = udp_hdr(skb);
+ data_offset = (u8 *)udp - skb->data;
+ if (unlikely(data_offset > U16_MAX ||
+ data_offset + sizeof(struct udphdr) > skb->len))
+ /* Packet has offset at impossible location or isn't big enough
+ * to have UDP fields.
+ */
+ return -EINVAL;
+ data_len = ntohs(udp->len);
+ if (unlikely(data_len < sizeof(struct udphdr) ||
+ data_len > skb->len - data_offset))
+ /* UDP packet is reporting too small of a size or lying about
+ * its size.
+ */
+ return -EINVAL;
+ data_len -= sizeof(struct udphdr);
+ data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
+ if (unlikely(!pskb_may_pull(skb,
+ data_offset + sizeof(struct message_header)) ||
+ pskb_trim(skb, data_len + data_offset) < 0))
+ return -EINVAL;
+ skb_pull(skb, data_offset);
+ if (unlikely(skb->len != data_len))
+ /* Final len does not agree with calculated len */
+ return -EINVAL;
+ header_len = validate_header_len(skb);
+ if (unlikely(!header_len))
+ return -EINVAL;
+ __skb_push(skb, data_offset);
+ if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
+ return -EINVAL;
+ __skb_pull(skb, data_offset);
+ return 0;
+}
+
+static void wg_receive_handshake_packet(struct wg_device *wg,
+ struct sk_buff *skb)
+{
+ enum cookie_mac_state mac_state;
+ struct wg_peer *peer = NULL;
+ /* This is global, so that our load calculation applies to the whole
+ * system. We don't care about races with it at all.
+ */
+ static u64 last_under_load;
+ bool packet_needs_cookie;
+ bool under_load;
+
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
+ net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
+ wg->dev->name, skb);
+ wg_cookie_message_consume(
+ (struct message_handshake_cookie *)skb->data, wg);
+ return;
+ }
+
+ under_load = skb_queue_len(&wg->incoming_handshakes) >=
+ MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+ if (under_load)
+ last_under_load = ktime_get_coarse_boottime_ns();
+ else if (last_under_load)
+ under_load = !wg_birthdate_has_expired(last_under_load, 1);
+ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
+ under_load);
+ if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
+ (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
+ packet_needs_cookie = false;
+ } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
+ packet_needs_cookie = true;
+ } else {
+ net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+
+ switch (SKB_TYPE_LE32(skb)) {
+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
+ struct message_handshake_initiation *message =
+ (struct message_handshake_initiation *)skb->data;
+
+ if (packet_needs_cookie) {
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
+ return;
+ }
+ peer = wg_noise_handshake_consume_initiation(message, wg);
+ if (unlikely(!peer)) {
+ net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
+ net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
+ wg->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ wg_packet_send_handshake_response(peer);
+ break;
+ }
+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
+ struct message_handshake_response *message =
+ (struct message_handshake_response *)skb->data;
+
+ if (packet_needs_cookie) {
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
+ return;
+ }
+ peer = wg_noise_handshake_consume_response(message, wg);
+ if (unlikely(!peer)) {
+ net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
+ net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
+ wg->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_handshake_complete(peer);
+ /* Calling this function will either send any existing
+ * packets in the queue and not send a keepalive, which
+ * is the best case, Or, if there's nothing in the
+ * queue, it will send a keepalive, in order to give
+ * immediate confirmation of the session.
+ */
+ wg_packet_send_keepalive(peer);
+ }
+ break;
+ }
+ }
+
+ if (unlikely(!peer)) {
+ WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
+ return;
+ }
+
+ local_bh_disable();
+ update_rx_stats(peer, skb->len);
+ local_bh_enable();
+
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_peer_put(peer);
+}
+
+void wg_packet_handshake_receive_worker(struct work_struct *work)
+{
+ struct wg_device *wg = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+ wg_receive_handshake_packet(wg, skb);
+ dev_kfree_skb(skb);
+ cond_resched();
+ }
+}
+
+static void keep_key_fresh(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+ bool send = false;
+
+ if (peer->sent_lastminute_handshake)
+ return;
+
+ rcu_read_lock_bh();
+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
+ keypair->i_am_the_initiator &&
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
+ send = true;
+ rcu_read_unlock_bh();
+
+ if (send) {
+ peer->sent_lastminute_handshake = true;
+ wg_packet_send_queued_handshake_initiation(peer, false);
+ }
+}
+
+static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 8];
+ struct sk_buff *trailer;
+ unsigned int offset;
+ int num_frags;
+
+ if (unlikely(!key))
+ return false;
+
+ if (unlikely(!READ_ONCE(key->is_valid) ||
+ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
+ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(key->is_valid, false);
+ return false;
+ }
+
+ PACKET_CB(skb)->nonce =
+ le64_to_cpu(((struct message_data *)skb->data)->counter);
+
+ /* We ensure that the network header is part of the packet before we
+ * call skb_cow_data, so that there's no chance that data is removed
+ * from the skb, so that later we can extract the original endpoint.
+ */
+ offset = skb->data - skb_network_header(skb);
+ skb_push(skb, offset);
+ num_frags = skb_cow_data(skb, 0, &trailer);
+ offset += sizeof(struct message_data);
+ skb_pull(skb, offset);
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
+ return false;
+
+ sg_init_table(sg, num_frags);
+ if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
+ return false;
+
+ if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
+ PACKET_CB(skb)->nonce,
+ key->key))
+ return false;
+
+ /* Another ugly situation of pushing and pulling the header so as to
+ * keep endpoint information intact.
+ */
+ skb_push(skb, offset);
+ if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
+ return false;
+ skb_pull(skb, offset);
+
+ return true;
+}
+
+/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
+static bool counter_validate(union noise_counter *counter, u64 their_counter)
+{
+ unsigned long index, index_current, top, i;
+ bool ret = false;
+
+ spin_lock_bh(&counter->receive.lock);
+
+ if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+ their_counter >= REJECT_AFTER_MESSAGES))
+ goto out;
+
+ ++their_counter;
+
+ if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
+ counter->receive.counter))
+ goto out;
+
+ index = their_counter >> ilog2(BITS_PER_LONG);
+
+ if (likely(their_counter > counter->receive.counter)) {
+ index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+ top = min_t(unsigned long, index - index_current,
+ COUNTER_BITS_TOTAL / BITS_PER_LONG);
+ for (i = 1; i <= top; ++i)
+ counter->receive.backtrack[(i + index_current) &
+ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
+ counter->receive.counter = their_counter;
+ }
+
+ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
+ ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
+ &counter->receive.backtrack[index]);
+
+out:
+ spin_unlock_bh(&counter->receive.lock);
+ return ret;
+}
+
+#include "selftest/counter.c"
+
+static void wg_packet_consume_data_done(struct wg_peer *peer,
+ struct sk_buff *skb,
+ struct endpoint *endpoint)
+{
+ struct net_device *dev = peer->device->dev;
+ unsigned int len, len_before_trim;
+ struct wg_peer *routed_peer;
+
+ wg_socket_set_peer_endpoint(peer, endpoint);
+
+ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
+ PACKET_CB(skb)->keypair))) {
+ wg_timers_handshake_complete(peer);
+ wg_packet_send_staged_packets(peer);
+ }
+
+ keep_key_fresh(peer);
+
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+
+ /* A packet with length 0 is a keepalive packet */
+ if (unlikely(!skb->len)) {
+ update_rx_stats(peer, message_data_len(0));
+ net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ goto packet_processed;
+ }
+
+ wg_timers_data_received(peer);
+
+ if (unlikely(skb_network_header(skb) < skb->head))
+ goto dishonest_packet_size;
+ if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
+ (ip_hdr(skb)->version == 4 ||
+ (ip_hdr(skb)->version == 6 &&
+ pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
+ goto dishonest_packet_type;
+
+ skb->dev = dev;
+ /* We've already verified the Poly1305 auth tag, which means this packet
+ * was not modified in transit. We can therefore tell the networking
+ * stack that all checksums of every layer of encapsulation have already
+ * been checked "by the hardware" and therefore is unnecessary to check
+ * again in software.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = ~0; /* All levels */
+ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ len = ntohs(ip_hdr(skb)->tot_len);
+ if (unlikely(len < sizeof(struct iphdr)))
+ goto dishonest_packet_size;
+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
+ IP_ECN_set_ce(ip_hdr(skb));
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ len = ntohs(ipv6_hdr(skb)->payload_len) +
+ sizeof(struct ipv6hdr);
+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
+ IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ } else {
+ goto dishonest_packet_type;
+ }
+
+ if (unlikely(len > skb->len))
+ goto dishonest_packet_size;
+ len_before_trim = skb->len;
+ if (unlikely(pskb_trim(skb, len)))
+ goto packet_processed;
+
+ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
+ skb);
+ wg_peer_put(routed_peer); /* We don't need the extra reference. */
+
+ if (unlikely(routed_peer != peer))
+ goto dishonest_packet_peer;
+
+ if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
+ ++dev->stats.rx_dropped;
+ net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ } else {
+ update_rx_stats(peer, message_data_len(len_before_trim));
+ }
+ return;
+
+dishonest_packet_peer:
+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ dev->name, skb, peer->internal_id,
+ &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_frame_errors;
+ goto packet_processed;
+dishonest_packet_type:
+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_frame_errors;
+ goto packet_processed;
+dishonest_packet_size:
+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_length_errors;
+ goto packet_processed;
+packet_processed:
+ dev_kfree_skb(skb);
+}
+
+int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
+ struct crypt_queue *queue = &peer->rx_queue;
+ struct noise_keypair *keypair;
+ struct endpoint endpoint;
+ enum packet_state state;
+ struct sk_buff *skb;
+ int work_done = 0;
+ bool free;
+
+ if (unlikely(budget <= 0))
+ return 0;
+
+ while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+ __ptr_ring_discard_one(&queue->ring);
+ peer = PACKET_PEER(skb);
+ keypair = PACKET_CB(skb)->keypair;
+ free = true;
+
+ if (unlikely(state != PACKET_STATE_CRYPTED))
+ goto next;
+
+ if (unlikely(!counter_validate(&keypair->receiving.counter,
+ PACKET_CB(skb)->nonce))) {
+ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
+ peer->device->dev->name,
+ PACKET_CB(skb)->nonce,
+ keypair->receiving.counter.receive.counter);
+ goto next;
+ }
+
+ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
+ goto next;
+
+ wg_reset_packet(skb);
+ wg_packet_consume_data_done(peer, skb, &endpoint);
+ free = false;
+
+next:
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
+ if (unlikely(free))
+ dev_kfree_skb(skb);
+
+ if (++work_done >= budget)
+ break;
+ }
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+void wg_packet_decrypt_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *skb;
+
+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
+ enum packet_state state = likely(decrypt_packet(skb,
+ &PACKET_CB(skb)->keypair->receiving)) ?
+ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
+ wg_queue_enqueue_per_peer_napi(skb, state);
+ }
+}
+
+static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
+{
+ __le32 idx = ((struct message_data *)skb->data)->key_idx;
+ struct wg_peer *peer = NULL;
+ int ret;
+
+ rcu_read_lock_bh();
+ PACKET_CB(skb)->keypair =
+ (struct noise_keypair *)wg_index_hashtable_lookup(
+ wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
+ &peer);
+ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
+ goto err_keypair;
+
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
+ &peer->rx_queue, skb,
+ wg->packet_crypt_wq,
+ &wg->decrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+ wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
+ if (likely(!ret || ret == -EPIPE)) {
+ rcu_read_unlock_bh();
+ return;
+ }
+err:
+ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
+err_keypair:
+ rcu_read_unlock_bh();
+ wg_peer_put(peer);
+ dev_kfree_skb(skb);
+}
+
+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
+{
+ if (unlikely(prepare_skb_header(skb, wg) < 0))
+ goto err;
+ switch (SKB_TYPE_LE32(skb)) {
+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
+ case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
+ int cpu;
+
+ if (skb_queue_len(&wg->incoming_handshakes) >
+ MAX_QUEUED_INCOMING_HANDSHAKES ||
+ unlikely(!rng_is_initialized())) {
+ net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ goto err;
+ }
+ skb_queue_tail(&wg->incoming_handshakes, skb);
+ /* Queues up a call to packet_process_queued_handshake_
+ * packets(skb):
+ */
+ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+ queue_work_on(cpu, wg->handshake_receive_wq,
+ &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+ break;
+ }
+ case cpu_to_le32(MESSAGE_DATA):
+ PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
+ wg_packet_consume_data(wg, skb);
+ break;
+ default:
+ net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ goto err;
+ }
+ return;
+
+err:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
new file mode 100644
index 000000000000..846db14cb046
--- /dev/null
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This contains some basic static unit tests for the allowedips data structure.
+ * It also has two additional modes that are disabled and meant to be used by
+ * folks directly playing with this file. If you define the macro
+ * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
+ * memory, it will be printed out as KERN_DEBUG in a format that can be passed
+ * to graphviz (the dot command) to visualize it. If you define the macro
+ * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
+ * randomized tests done against a trivial implementation, which may take
+ * upwards of a half-hour to complete. There's no set of users who should be
+ * enabling these, and the only developers that should go anywhere near these
+ * nobs are the ones who are reading this comment.
+ */
+
+#ifdef DEBUG
+
+#include <linux/siphash.h>
+
+static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
+ u8 cidr)
+{
+ swap_endian(dst, src, bits);
+ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
+ if (cidr)
+ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
+}
+
+static __init void print_node(struct allowedips_node *node, u8 bits)
+{
+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
+ char *fmt_declaration = KERN_DEBUG
+ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ char *style = "dotted";
+ u8 ip1[16], ip2[16];
+ u32 color = 0;
+
+ if (bits == 32) {
+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
+ fmt_declaration = KERN_DEBUG
+ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ } else if (bits == 128) {
+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
+ fmt_declaration = KERN_DEBUG
+ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ }
+ if (node->peer) {
+ hsiphash_key_t key = { { 0 } };
+
+ memcpy(&key, &node->peer, sizeof(node->peer));
+ color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
+ hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
+ hsiphash_1u32(0xabad1dea, &key) % 200;
+ style = "bold";
+ }
+ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
+ printk(fmt_declaration, ip1, node->cidr, style, color);
+ if (node->bit[0]) {
+ swap_endian_and_apply_cidr(ip2,
+ rcu_dereference_raw(node->bit[0])->bits, bits,
+ node->cidr);
+ printk(fmt_connection, ip1, node->cidr, ip2,
+ rcu_dereference_raw(node->bit[0])->cidr);
+ print_node(rcu_dereference_raw(node->bit[0]), bits);
+ }
+ if (node->bit[1]) {
+ swap_endian_and_apply_cidr(ip2,
+ rcu_dereference_raw(node->bit[1])->bits,
+ bits, node->cidr);
+ printk(fmt_connection, ip1, node->cidr, ip2,
+ rcu_dereference_raw(node->bit[1])->cidr);
+ print_node(rcu_dereference_raw(node->bit[1]), bits);
+ }
+}
+
+static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
+{
+ printk(KERN_DEBUG "digraph trie {\n");
+ print_node(rcu_dereference_raw(top), bits);
+ printk(KERN_DEBUG "}\n");
+}
+
+enum {
+ NUM_PEERS = 2000,
+ NUM_RAND_ROUTES = 400,
+ NUM_MUTATED_ROUTES = 100,
+ NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
+};
+
+struct horrible_allowedips {
+ struct hlist_head head;
+};
+
+struct horrible_allowedips_node {
+ struct hlist_node table;
+ union nf_inet_addr ip;
+ union nf_inet_addr mask;
+ u8 ip_version;
+ void *value;
+};
+
+static __init void horrible_allowedips_init(struct horrible_allowedips *table)
+{
+ INIT_HLIST_HEAD(&table->head);
+}
+
+static __init void horrible_allowedips_free(struct horrible_allowedips *table)
+{
+ struct horrible_allowedips_node *node;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
+ hlist_del(&node->table);
+ kfree(node);
+ }
+}
+
+static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
+{
+ union nf_inet_addr mask;
+
+ memset(&mask, 0x00, 128 / 8);
+ memset(&mask, 0xff, cidr / 8);
+ if (cidr % 32)
+ mask.all[cidr / 32] = (__force u32)htonl(
+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
+ return mask;
+}
+
+static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
+{
+ return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
+ hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
+}
+
+static __init inline void
+horrible_mask_self(struct horrible_allowedips_node *node)
+{
+ if (node->ip_version == 4) {
+ node->ip.ip &= node->mask.ip;
+ } else if (node->ip_version == 6) {
+ node->ip.ip6[0] &= node->mask.ip6[0];
+ node->ip.ip6[1] &= node->mask.ip6[1];
+ node->ip.ip6[2] &= node->mask.ip6[2];
+ node->ip.ip6[3] &= node->mask.ip6[3];
+ }
+}
+
+static __init inline bool
+horrible_match_v4(const struct horrible_allowedips_node *node,
+ struct in_addr *ip)
+{
+ return (ip->s_addr & node->mask.ip) == node->ip.ip;
+}
+
+static __init inline bool
+horrible_match_v6(const struct horrible_allowedips_node *node,
+ struct in6_addr *ip)
+{
+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
+ node->ip.ip6[0] &&
+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
+ node->ip.ip6[1] &&
+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
+ node->ip.ip6[2] &&
+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
+}
+
+static __init void
+horrible_insert_ordered(struct horrible_allowedips *table,
+ struct horrible_allowedips_node *node)
+{
+ struct horrible_allowedips_node *other = NULL, *where = NULL;
+ u8 my_cidr = horrible_mask_to_cidr(node->mask);
+
+ hlist_for_each_entry(other, &table->head, table) {
+ if (!memcmp(&other->mask, &node->mask,
+ sizeof(union nf_inet_addr)) &&
+ !memcmp(&other->ip, &node->ip,
+ sizeof(union nf_inet_addr)) &&
+ other->ip_version == node->ip_version) {
+ other->value = node->value;
+ kfree(node);
+ return;
+ }
+ where = other;
+ if (horrible_mask_to_cidr(other->mask) <= my_cidr)
+ break;
+ }
+ if (!other && !where)
+ hlist_add_head(&node->table, &table->head);
+ else if (!other)
+ hlist_add_behind(&node->table, &where->table);
+ else
+ hlist_add_before(&node->table, &where->table);
+}
+
+static __init int
+horrible_allowedips_insert_v4(struct horrible_allowedips *table,
+ struct in_addr *ip, u8 cidr, void *value)
+{
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+ node->ip.in = *ip;
+ node->mask = horrible_cidr_to_mask(cidr);
+ node->ip_version = 4;
+ node->value = value;
+ horrible_mask_self(node);
+ horrible_insert_ordered(table, node);
+ return 0;
+}
+
+static __init int
+horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip, u8 cidr, void *value)
+{
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+ node->ip.in6 = *ip;
+ node->mask = horrible_cidr_to_mask(cidr);
+ node->ip_version = 6;
+ node->value = value;
+ horrible_mask_self(node);
+ horrible_insert_ordered(table, node);
+ return 0;
+}
+
+static __init void *
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
+ struct in_addr *ip)
+{
+ struct horrible_allowedips_node *node;
+ void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+ if (node->ip_version != 4)
+ continue;
+ if (horrible_match_v4(node, ip)) {
+ ret = node->value;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init void *
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip)
+{
+ struct horrible_allowedips_node *node;
+ void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+ if (node->ip_version != 6)
+ continue;
+ if (horrible_match_v6(node, ip)) {
+ ret = node->value;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init bool randomized_test(void)
+{
+ unsigned int i, j, k, mutate_amount, cidr;
+ u8 ip[16], mutate_mask[16], mutated[16];
+ struct wg_peer **peers, *peer;
+ struct horrible_allowedips h;
+ DEFINE_MUTEX(mutex);
+ struct allowedips t;
+ bool ret = false;
+
+ mutex_init(&mutex);
+
+ wg_allowedips_init(&t);
+ horrible_allowedips_init(&h);
+
+ peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
+ if (unlikely(!peers)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free;
+ }
+ for (i = 0; i < NUM_PEERS; ++i) {
+ peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
+ if (unlikely(!peers[i])) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free;
+ }
+ kref_init(&peers[i]->refcount);
+ }
+
+ mutex_lock(&mutex);
+
+ for (i = 0; i < NUM_RAND_ROUTES; ++i) {
+ prandom_bytes(ip, 4);
+ cidr = prandom_u32_max(32) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
+ peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
+ cidr, peer) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
+ memcpy(mutated, ip, 4);
+ prandom_bytes(mutate_mask, 4);
+ mutate_amount = prandom_u32_max(32);
+ for (k = 0; k < mutate_amount / 8; ++k)
+ mutate_mask[k] = 0xff;
+ mutate_mask[k] = 0xff
+ << ((8 - (mutate_amount % 8)) % 8);
+ for (; k < 4; ++k)
+ mutate_mask[k] = 0;
+ for (k = 0; k < 4; ++k)
+ mutated[k] = (mutated[k] & mutate_mask[k]) |
+ (~mutate_mask[k] &
+ prandom_u32_max(256));
+ cidr = prandom_u32_max(32) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v4(&t,
+ (struct in_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+ pr_err("allowedips random malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h,
+ (struct in_addr *)mutated, cidr, peer)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ }
+ }
+
+ for (i = 0; i < NUM_RAND_ROUTES; ++i) {
+ prandom_bytes(ip, 16);
+ cidr = prandom_u32_max(128) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
+ peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
+ cidr, peer) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
+ memcpy(mutated, ip, 16);
+ prandom_bytes(mutate_mask, 16);
+ mutate_amount = prandom_u32_max(128);
+ for (k = 0; k < mutate_amount / 8; ++k)
+ mutate_mask[k] = 0xff;
+ mutate_mask[k] = 0xff
+ << ((8 - (mutate_amount % 8)) % 8);
+ for (; k < 4; ++k)
+ mutate_mask[k] = 0;
+ for (k = 0; k < 4; ++k)
+ mutated[k] = (mutated[k] & mutate_mask[k]) |
+ (~mutate_mask[k] &
+ prandom_u32_max(256));
+ cidr = prandom_u32_max(128) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v6(&t,
+ (struct in6_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v6(
+ &h, (struct in6_addr *)mutated, cidr,
+ peer)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ }
+ }
+
+ mutex_unlock(&mutex);
+
+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
+ print_tree(t.root4, 32);
+ print_tree(t.root6, 128);
+ }
+
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 4);
+ if (lookup(t.root4, 32, ip) !=
+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+ pr_err("allowedips random self-test: FAIL\n");
+ goto free;
+ }
+ }
+
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 16);
+ if (lookup(t.root6, 128, ip) !=
+ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+ pr_err("allowedips random self-test: FAIL\n");
+ goto free;
+ }
+ }
+ ret = true;
+
+free:
+ mutex_lock(&mutex);
+free_locked:
+ wg_allowedips_free(&t, &mutex);
+ mutex_unlock(&mutex);
+ horrible_allowedips_free(&h);
+ if (peers) {
+ for (i = 0; i < NUM_PEERS; ++i)
+ kfree(peers[i]);
+ }
+ kfree(peers);
+ return ret;
+}
+
+static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
+{
+ static struct in_addr ip;
+ u8 *split = (u8 *)&ip;
+
+ split[0] = a;
+ split[1] = b;
+ split[2] = c;
+ split[3] = d;
+ return &ip;
+}
+
+static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
+{
+ static struct in6_addr ip;
+ __be32 *split = (__be32 *)&ip;
+
+ split[0] = cpu_to_be32(a);
+ split[1] = cpu_to_be32(b);
+ split[2] = cpu_to_be32(c);
+ split[3] = cpu_to_be32(d);
+ return &ip;
+}
+
+static __init struct wg_peer *init_peer(void)
+{
+ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+
+ if (!peer)
+ return NULL;
+ kref_init(&peer->refcount);
+ INIT_LIST_HEAD(&peer->allowedips_list);
+ return peer;
+}
+
+#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
+
+#define maybe_fail() do { \
+ ++i; \
+ if (!_s) { \
+ pr_info("allowedips self-test %zu: FAIL\n", i); \
+ success = false; \
+ } \
+ } while (0)
+
+#define test(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) == (mem); \
+ maybe_fail(); \
+ } while (0)
+
+#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) != (mem); \
+ maybe_fail(); \
+ } while (0)
+
+#define test_boolean(cond) do { \
+ bool _s = (cond); \
+ maybe_fail(); \
+ } while (0)
+
+bool __init wg_allowedips_selftest(void)
+{
+ bool found_a = false, found_b = false, found_c = false, found_d = false,
+ found_e = false, found_other = false;
+ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
+ *d = init_peer(), *e = init_peer(), *f = init_peer(),
+ *g = init_peer(), *h = init_peer();
+ struct allowedips_node *iter_node;
+ bool success = false;
+ struct allowedips t;
+ DEFINE_MUTEX(mutex);
+ struct in6_addr ip;
+ size_t i = 0, count = 0;
+ __be64 part;
+
+ mutex_init(&mutex);
+ mutex_lock(&mutex);
+ wg_allowedips_init(&t);
+
+ if (!a || !b || !c || !d || !e || !f || !g || !h) {
+ pr_err("allowedips self-test malloc: FAIL\n");
+ goto free;
+ }
+
+ insert(4, a, 192, 168, 4, 0, 24);
+ insert(4, b, 192, 168, 4, 4, 32);
+ insert(4, c, 192, 168, 0, 0, 16);
+ insert(4, d, 192, 95, 5, 64, 27);
+ /* replaces previous entry, and maskself is required */
+ insert(4, c, 192, 95, 5, 65, 27);
+ insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
+ insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
+ insert(4, e, 0, 0, 0, 0, 0);
+ insert(6, e, 0, 0, 0, 0, 0);
+ /* replaces previous entry */
+ insert(6, f, 0, 0, 0, 0, 0);
+ insert(6, g, 0x24046800, 0, 0, 0, 32);
+ /* maskself is required */
+ insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
+ insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
+ insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ insert(4, g, 64, 15, 112, 0, 20);
+ /* maskself is required */
+ insert(4, h, 64, 15, 123, 211, 25);
+ insert(4, a, 10, 0, 0, 0, 25);
+ insert(4, b, 10, 0, 0, 128, 25);
+ insert(4, a, 10, 1, 0, 0, 30);
+ insert(4, b, 10, 1, 0, 4, 30);
+ insert(4, c, 10, 1, 0, 8, 29);
+ insert(4, d, 10, 1, 0, 16, 29);
+
+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
+ print_tree(t.root4, 32);
+ print_tree(t.root6, 128);
+ }
+
+ success = true;
+
+ test(4, a, 192, 168, 4, 20);
+ test(4, a, 192, 168, 4, 0);
+ test(4, b, 192, 168, 4, 4);
+ test(4, c, 192, 168, 200, 182);
+ test(4, c, 192, 95, 5, 68);
+ test(4, e, 192, 95, 5, 96);
+ test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
+ test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
+ test(6, f, 0x26075300, 0x60006b01, 0, 0);
+ test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
+ test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
+ test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
+ test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
+ test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
+ test(6, h, 0x24046800, 0x40040800, 0, 0);
+ test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
+ test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
+ test(4, g, 64, 15, 116, 26);
+ test(4, g, 64, 15, 127, 3);
+ test(4, g, 64, 15, 123, 1);
+ test(4, h, 64, 15, 123, 128);
+ test(4, h, 64, 15, 123, 129);
+ test(4, a, 10, 0, 0, 52);
+ test(4, b, 10, 0, 0, 220);
+ test(4, a, 10, 1, 0, 2);
+ test(4, b, 10, 1, 0, 6);
+ test(4, c, 10, 1, 0, 10);
+ test(4, d, 10, 1, 0, 20);
+
+ insert(4, a, 1, 0, 0, 0, 32);
+ insert(4, a, 64, 0, 0, 0, 32);
+ insert(4, a, 128, 0, 0, 0, 32);
+ insert(4, a, 192, 0, 0, 0, 32);
+ insert(4, a, 255, 0, 0, 0, 32);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 1, 0, 0, 0);
+ test_negative(4, a, 64, 0, 0, 0);
+ test_negative(4, a, 128, 0, 0, 0);
+ test_negative(4, a, 192, 0, 0, 0);
+ test_negative(4, a, 255, 0, 0, 0);
+
+ wg_allowedips_free(&t, &mutex);
+ wg_allowedips_init(&t);
+ insert(4, a, 192, 168, 0, 0, 16);
+ insert(4, a, 192, 168, 0, 0, 24);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 192, 168, 0, 1);
+
+ /* These will hit the WARN_ON(len >= 128) in free_node if something
+ * goes wrong.
+ */
+ for (i = 0; i < 128; ++i) {
+ part = cpu_to_be64(~(1LLU << (i % 64)));
+ memset(&ip, 0xff, 16);
+ memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ }
+
+ wg_allowedips_free(&t, &mutex);
+
+ wg_allowedips_init(&t);
+ insert(4, a, 192, 95, 5, 93, 27);
+ insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
+ insert(4, a, 10, 1, 0, 20, 29);
+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
+ list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
+ u8 cidr, ip[16] __aligned(__alignof(u64));
+ int family = wg_allowedips_read_node(iter_node, ip, &cidr);
+
+ count++;
+
+ if (cidr == 27 && family == AF_INET &&
+ !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
+ found_a = true;
+ else if (cidr == 128 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
+ sizeof(struct in6_addr)))
+ found_b = true;
+ else if (cidr == 29 && family == AF_INET &&
+ !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
+ found_c = true;
+ else if (cidr == 83 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
+ sizeof(struct in6_addr)))
+ found_d = true;
+ else if (cidr == 21 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075000, 0, 0, 0),
+ sizeof(struct in6_addr)))
+ found_e = true;
+ else
+ found_other = true;
+ }
+ test_boolean(count == 5);
+ test_boolean(found_a);
+ test_boolean(found_b);
+ test_boolean(found_c);
+ test_boolean(found_d);
+ test_boolean(found_e);
+ test_boolean(!found_other);
+
+ if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
+ success = randomized_test();
+
+ if (success)
+ pr_info("allowedips self-tests: pass\n");
+
+free:
+ wg_allowedips_free(&t, &mutex);
+ kfree(a);
+ kfree(b);
+ kfree(c);
+ kfree(d);
+ kfree(e);
+ kfree(f);
+ kfree(g);
+ kfree(h);
+ mutex_unlock(&mutex);
+
+ return success;
+}
+
+#undef test_negative
+#undef test
+#undef remove
+#undef insert
+#undef init_peer
+
+#endif
diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c
new file mode 100644
index 000000000000..f4fbb9072ed7
--- /dev/null
+++ b/drivers/net/wireguard/selftest/counter.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifdef DEBUG
+bool __init wg_packet_counter_selftest(void)
+{
+ unsigned int test_num = 0, i;
+ union noise_counter counter;
+ bool success = true;
+
+#define T_INIT do { \
+ memset(&counter, 0, sizeof(union noise_counter)); \
+ spin_lock_init(&counter.receive.lock); \
+ } while (0)
+#define T_LIM (COUNTER_WINDOW_SIZE + 1)
+#define T(n, v) do { \
+ ++test_num; \
+ if (counter_validate(&counter, n) != (v)) { \
+ pr_err("nonce counter self-test %u: FAIL\n", \
+ test_num); \
+ success = false; \
+ } \
+ } while (0)
+
+ T_INIT;
+ /* 1 */ T(0, true);
+ /* 2 */ T(1, true);
+ /* 3 */ T(1, false);
+ /* 4 */ T(9, true);
+ /* 5 */ T(8, true);
+ /* 6 */ T(7, true);
+ /* 7 */ T(7, false);
+ /* 8 */ T(T_LIM, true);
+ /* 9 */ T(T_LIM - 1, true);
+ /* 10 */ T(T_LIM - 1, false);
+ /* 11 */ T(T_LIM - 2, true);
+ /* 12 */ T(2, true);
+ /* 13 */ T(2, false);
+ /* 14 */ T(T_LIM + 16, true);
+ /* 15 */ T(3, false);
+ /* 16 */ T(T_LIM + 16, false);
+ /* 17 */ T(T_LIM * 4, true);
+ /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true);
+ /* 19 */ T(10, false);
+ /* 20 */ T(T_LIM * 4 - T_LIM, false);
+ /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false);
+ /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true);
+ /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false);
+ /* 24 */ T(0, false);
+ /* 25 */ T(REJECT_AFTER_MESSAGES, false);
+ /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true);
+ /* 27 */ T(REJECT_AFTER_MESSAGES, false);
+ /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false);
+ /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true);
+ /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false);
+ /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false);
+ /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false);
+ /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true);
+ /* 34 */ T(0, false);
+
+ T_INIT;
+ for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i)
+ T(i, true);
+ T(0, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i)
+ T(i, true);
+ T(1, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;)
+ T(i, true);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;)
+ T(i, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
+ T(i, true);
+ T(COUNTER_WINDOW_SIZE + 1, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
+ T(i, true);
+ T(0, true);
+ T(COUNTER_WINDOW_SIZE + 1, true);
+
+#undef T
+#undef T_LIM
+#undef T_INIT
+
+ if (success)
+ pr_info("nonce counter self-tests: pass\n");
+ return success;
+}
+#endif
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
new file mode 100644
index 000000000000..bcd6462e4540
--- /dev/null
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifdef DEBUG
+
+#include <linux/jiffies.h>
+
+static const struct {
+ bool result;
+ unsigned int msec_to_sleep_before;
+} expected_results[] __initconst = {
+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
+ [PACKETS_BURSTABLE] = { false, 0 },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 2] = { false, 0 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 4] = { true, 0 },
+ [PACKETS_BURSTABLE + 5] = { false, 0 }
+};
+
+static __init unsigned int maximum_jiffies_at_index(int index)
+{
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ int i;
+
+ for (i = 0; i <= index; ++i)
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
+}
+
+static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ struct sk_buff *skb6, struct ipv6hdr *hdr6,
+ int *test)
+{
+ unsigned long loop_start_time;
+ int i;
+
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+ loop_start_time = jiffies;
+
+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (wg_ratelimiter_allow(skb4, &init_net) !=
+ expected_results[i].result)
+ return -EXFULL;
+ ++(*test);
+
+ hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (!wg_ratelimiter_allow(skb4, &init_net))
+ return -EXFULL;
+ ++(*test);
+
+ hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ hdr6->saddr.in6_u.u6_addr32[2] = htonl(i);
+ hdr6->saddr.in6_u.u6_addr32[3] = htonl(i);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (wg_ratelimiter_allow(skb6, &init_net) !=
+ expected_results[i].result)
+ return -EXFULL;
+ ++(*test);
+
+ hdr6->saddr.in6_u.u6_addr32[0] =
+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (!wg_ratelimiter_allow(skb6, &init_net))
+ return -EXFULL;
+ ++(*test);
+
+ hdr6->saddr.in6_u.u6_addr32[0] =
+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+#endif
+ }
+ return 0;
+}
+
+static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ int *test)
+{
+ int i;
+
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+
+ if (atomic_read(&total_entries))
+ return -EXFULL;
+ ++(*test);
+
+ for (i = 0; i <= max_entries; ++i) {
+ hdr4->saddr = htonl(i);
+ if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries))
+ return -EXFULL;
+ ++(*test);
+ }
+ return 0;
+}
+
+bool __init wg_ratelimiter_selftest(void)
+{
+ enum { TRIALS_BEFORE_GIVING_UP = 5000 };
+ bool success = false;
+ int test = 0, trials;
+ struct sk_buff *skb4, *skb6;
+ struct iphdr *hdr4;
+ struct ipv6hdr *hdr6;
+
+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
+ return true;
+
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+
+ if (wg_ratelimiter_init())
+ goto out;
+ ++test;
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ goto out;
+ }
+ ++test;
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ goto out;
+ }
+ ++test;
+
+ skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL);
+ if (unlikely(!skb4))
+ goto err_nofree;
+ skb4->protocol = htons(ETH_P_IP);
+ hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4));
+ hdr4->saddr = htonl(8182);
+ skb_reset_network_header(skb4);
+ ++test;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL);
+ if (unlikely(!skb6)) {
+ kfree_skb(skb4);
+ goto err_nofree;
+ }
+ skb6->protocol = htons(ETH_P_IPV6);
+ hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6));
+ hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212);
+ hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188);
+ skb_reset_network_header(skb6);
+ ++test;
+#endif
+
+ for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ int test_count = 0, ret;
+
+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
+ if (ret == -ETIMEDOUT) {
+ if (!trials--) {
+ test += test_count;
+ goto err;
+ }
+ msleep(500);
+ continue;
+ } else if (ret < 0) {
+ test += test_count;
+ goto err;
+ } else {
+ test += test_count;
+ break;
+ }
+ }
+
+ for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ int test_count = 0;
+
+ if (capacity_test(skb4, hdr4, &test_count) < 0) {
+ if (!trials--) {
+ test += test_count;
+ goto err;
+ }
+ msleep(50);
+ continue;
+ }
+ test += test_count;
+ break;
+ }
+
+ success = true;
+
+err:
+ kfree_skb(skb4);
+#if IS_ENABLED(CONFIG_IPV6)
+ kfree_skb(skb6);
+#endif
+err_nofree:
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ /* Uninit one extra time to check underflow detection. */
+ wg_ratelimiter_uninit();
+out:
+ if (success)
+ pr_info("ratelimiter self-tests: pass\n");
+ else
+ pr_err("ratelimiter self-test %d: FAIL\n", test);
+
+ return success;
+}
+#endif
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
new file mode 100644
index 000000000000..c13260563446
--- /dev/null
+++ b/drivers/net/wireguard/send.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "timers.h"
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "messages.h"
+#include "cookie.h"
+
+#include <linux/uio.h>
+#include <linux/inetdevice.h>
+#include <linux/socket.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/sock.h>
+
+static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
+{
+ struct message_handshake_initiation packet;
+
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT))
+ return; /* This function is rate limited. */
+
+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
+ net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake,
+ ktime_get_coarse_boottime_ns());
+ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
+ HANDSHAKE_DSCP);
+ wg_timers_handshake_initiated(peer);
+ }
+}
+
+void wg_packet_handshake_send_worker(struct work_struct *work)
+{
+ struct wg_peer *peer = container_of(work, struct wg_peer,
+ transmit_handshake_work);
+
+ wg_packet_send_handshake_initiation(peer);
+ wg_peer_put(peer);
+}
+
+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
+ bool is_retry)
+{
+ if (!is_retry)
+ peer->timer_handshake_attempts = 0;
+
+ rcu_read_lock_bh();
+ /* We check last_sent_handshake here in addition to the actual function
+ * we're queueing up, so that we don't queue things if not strictly
+ * necessary:
+ */
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT) ||
+ unlikely(READ_ONCE(peer->is_dead)))
+ goto out;
+
+ wg_peer_get(peer);
+ /* Queues up calling packet_send_queued_handshakes(peer), where we do a
+ * peer_put(peer) after:
+ */
+ if (!queue_work(peer->device->handshake_send_wq,
+ &peer->transmit_handshake_work))
+ /* If the work was already queued, we want to drop the
+ * extra reference:
+ */
+ wg_peer_put(peer);
+out:
+ rcu_read_unlock_bh();
+}
+
+void wg_packet_send_handshake_response(struct wg_peer *peer)
+{
+ struct message_handshake_response packet;
+
+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
+ net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake,
+ ktime_get_coarse_boottime_ns());
+ wg_socket_send_buffer_to_peer(peer, &packet,
+ sizeof(packet),
+ HANDSHAKE_DSCP);
+ }
+ }
+}
+
+void wg_packet_send_handshake_cookie(struct wg_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index)
+{
+ struct message_handshake_cookie packet;
+
+ net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
+ wg->dev->name, initiating_skb);
+ wg_cookie_message_create(&packet, initiating_skb, sender_index,
+ &wg->cookie_checker);
+ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
+ sizeof(packet));
+}
+
+static void keep_key_fresh(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+ bool send = false;
+
+ rcu_read_lock_bh();
+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
+ (unlikely(atomic64_read(&keypair->sending.counter.counter) >
+ REKEY_AFTER_MESSAGES) ||
+ (keypair->i_am_the_initiator &&
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REKEY_AFTER_TIME)))))
+ send = true;
+ rcu_read_unlock_bh();
+
+ if (send)
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
+
+static unsigned int calculate_skb_padding(struct sk_buff *skb)
+{
+ /* We do this modulo business with the MTU, just in case the networking
+ * layer gives us a packet that's bigger than the MTU. In that case, we
+ * wouldn't want the final subtraction to overflow in the case of the
+ * padded_size being clamped.
+ */
+ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
+ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+
+ if (padded_size > PACKET_CB(skb)->mtu)
+ padded_size = PACKET_CB(skb)->mtu;
+ return padded_size - last_unit;
+}
+
+static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
+{
+ unsigned int padding_len, plaintext_len, trailer_len;
+ struct scatterlist sg[MAX_SKB_FRAGS + 8];
+ struct message_data *header;
+ struct sk_buff *trailer;
+ int num_frags;
+
+ /* Calculate lengths. */
+ padding_len = calculate_skb_padding(skb);
+ trailer_len = padding_len + noise_encrypted_len(0);
+ plaintext_len = skb->len + padding_len;
+
+ /* Expand data section to have room for padding and auth tag. */
+ num_frags = skb_cow_data(skb, trailer_len, &trailer);
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
+ return false;
+
+ /* Set the padding to zeros, and make sure it and the auth tag are part
+ * of the skb.
+ */
+ memset(skb_tail_pointer(trailer), 0, padding_len);
+
+ /* Expand head section to have room for our header and the network
+ * stack's headers.
+ */
+ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
+ return false;
+
+ /* Finalize checksum calculation for the inner packet, if required. */
+ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb)))
+ return false;
+
+ /* Only after checksumming can we safely add on the padding at the end
+ * and the header.
+ */
+ skb_set_inner_network_header(skb, 0);
+ header = (struct message_data *)skb_push(skb, sizeof(*header));
+ header->header.type = cpu_to_le32(MESSAGE_DATA);
+ header->key_idx = keypair->remote_index;
+ header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
+ pskb_put(skb, trailer, trailer_len);
+
+ /* Now we can encrypt the scattergather segments */
+ sg_init_table(sg, num_frags);
+ if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
+ noise_encrypted_len(plaintext_len)) <= 0)
+ return false;
+ return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
+ PACKET_CB(skb)->nonce,
+ keypair->sending.key);
+}
+
+void wg_packet_send_keepalive(struct wg_peer *peer)
+{
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&peer->staged_packet_queue)) {
+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ return;
+ skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
+ skb->dev = peer->device->dev;
+ PACKET_CB(skb)->mtu = skb->dev->mtu;
+ skb_queue_tail(&peer->staged_packet_queue, skb);
+ net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ }
+
+ wg_packet_send_staged_packets(peer);
+}
+
+static void wg_packet_create_data_done(struct sk_buff *first,
+ struct wg_peer *peer)
+{
+ struct sk_buff *skb, *next;
+ bool is_keepalive, data_sent = false;
+
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ skb_list_walk_safe(first, skb, next) {
+ is_keepalive = skb->len == message_data_len(0);
+ if (likely(!wg_socket_send_skb_to_peer(peer, skb,
+ PACKET_CB(skb)->ds) && !is_keepalive))
+ data_sent = true;
+ }
+
+ if (likely(data_sent))
+ wg_timers_data_sent(peer);
+
+ keep_key_fresh(peer);
+}
+
+void wg_packet_tx_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct crypt_queue,
+ work);
+ struct noise_keypair *keypair;
+ enum packet_state state;
+ struct sk_buff *first;
+ struct wg_peer *peer;
+
+ while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+ __ptr_ring_discard_one(&queue->ring);
+ peer = PACKET_PEER(first);
+ keypair = PACKET_CB(first)->keypair;
+
+ if (likely(state == PACKET_STATE_CRYPTED))
+ wg_packet_create_data_done(first, peer);
+ else
+ kfree_skb_list(first);
+
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
+ }
+}
+
+void wg_packet_encrypt_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *first, *skb, *next;
+
+ while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
+ enum packet_state state = PACKET_STATE_CRYPTED;
+
+ skb_list_walk_safe(first, skb, next) {
+ if (likely(encrypt_packet(skb,
+ PACKET_CB(first)->keypair))) {
+ wg_reset_packet(skb);
+ } else {
+ state = PACKET_STATE_DEAD;
+ break;
+ }
+ }
+ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
+ state);
+
+ }
+}
+
+static void wg_packet_create_data(struct sk_buff *first)
+{
+ struct wg_peer *peer = PACKET_PEER(first);
+ struct wg_device *wg = peer->device;
+ int ret = -EINVAL;
+
+ rcu_read_lock_bh();
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
+ &peer->tx_queue, first,
+ wg->packet_crypt_wq,
+ &wg->encrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+ wg_queue_enqueue_per_peer(&peer->tx_queue, first,
+ PACKET_STATE_DEAD);
+err:
+ rcu_read_unlock_bh();
+ if (likely(!ret || ret == -EPIPE))
+ return;
+ wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
+ wg_peer_put(peer);
+ kfree_skb_list(first);
+}
+
+void wg_packet_purge_staged_packets(struct wg_peer *peer)
+{
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+}
+
+void wg_packet_send_staged_packets(struct wg_peer *peer)
+{
+ struct noise_symmetric_key *key;
+ struct noise_keypair *keypair;
+ struct sk_buff_head packets;
+ struct sk_buff *skb;
+
+ /* Steal the current queue into our local one. */
+ __skb_queue_head_init(&packets);
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ skb_queue_splice_init(&peer->staged_packet_queue, &packets);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+ if (unlikely(skb_queue_empty(&packets)))
+ return;
+
+ /* First we make sure we have a valid reference to a valid key. */
+ rcu_read_lock_bh();
+ keypair = wg_noise_keypair_get(
+ rcu_dereference_bh(peer->keypairs.current_keypair));
+ rcu_read_unlock_bh();
+ if (unlikely(!keypair))
+ goto out_nokey;
+ key = &keypair->sending;
+ if (unlikely(!READ_ONCE(key->is_valid)))
+ goto out_nokey;
+ if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ REJECT_AFTER_TIME)))
+ goto out_invalid;
+
+ /* After we know we have a somewhat valid key, we now try to assign
+ * nonces to all of the packets in the queue. If we can't assign nonces
+ * for all of them, we just consider it a failure and wait for the next
+ * handshake.
+ */
+ skb_queue_walk(&packets, skb) {
+ /* 0 for no outer TOS: no leak. TODO: at some later point, we
+ * might consider using flowi->tos as outer instead.
+ */
+ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
+ PACKET_CB(skb)->nonce =
+ atomic64_inc_return(&key->counter.counter) - 1;
+ if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
+ goto out_invalid;
+ }
+
+ packets.prev->next = NULL;
+ wg_peer_get(keypair->entry.peer);
+ PACKET_CB(packets.next)->keypair = keypair;
+ wg_packet_create_data(packets.next);
+ return;
+
+out_invalid:
+ WRITE_ONCE(key->is_valid, false);
+out_nokey:
+ wg_noise_keypair_put(keypair, false);
+
+ /* We orphan the packets if we're waiting on a handshake, so that they
+ * don't block a socket's pool.
+ */
+ skb_queue_walk(&packets, skb)
+ skb_orphan(skb);
+ /* Then we put them back on the top of the queue. We're not too
+ * concerned about accidentally getting things a little out of order if
+ * packets are being added really fast, because this queue is for before
+ * packets can even be sent and it's small anyway.
+ */
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ skb_queue_splice(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+
+ /* If we're exiting because there's something wrong with the key, it
+ * means we should initiate a new handshake.
+ */
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
new file mode 100644
index 000000000000..262f3b5c819d
--- /dev/null
+++ b/drivers/net/wireguard/socket.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "queueing.h"
+#include "messages.h"
+
+#include <linux/ctype.h>
+#include <linux/net.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/inetdevice.h>
+#include <net/udp_tunnel.h>
+#include <net/ipv6.h>
+
+static int send4(struct wg_device *wg, struct sk_buff *skb,
+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
+{
+ struct flowi4 fl = {
+ .saddr = endpoint->src4.s_addr,
+ .daddr = endpoint->addr4.sin_addr.s_addr,
+ .fl4_dport = endpoint->addr4.sin_port,
+ .flowi4_mark = wg->fwmark,
+ .flowi4_proto = IPPROTO_UDP
+ };
+ struct rtable *rt = NULL;
+ struct sock *sock;
+ int ret = 0;
+
+ skb_mark_not_on_list(skb);
+ skb->dev = wg->dev;
+ skb->mark = wg->fwmark;
+
+ rcu_read_lock_bh();
+ sock = rcu_dereference_bh(wg->sock4);
+
+ if (unlikely(!sock)) {
+ ret = -ENONET;
+ goto err;
+ }
+
+ fl.fl4_sport = inet_sk(sock)->inet_sport;
+
+ if (cache)
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+
+ if (!rt) {
+ security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
+ if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
+ fl.saddr, RT_SCOPE_HOST))) {
+ endpoint->src4.s_addr = 0;
+ *(__force __be32 *)&endpoint->src_if4 = 0;
+ fl.saddr = 0;
+ if (cache)
+ dst_cache_reset(cache);
+ }
+ rt = ip_route_output_flow(sock_net(sock), &fl, sock);
+ if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
+ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
+ rt->dst.dev->ifindex != endpoint->src_if4)))) {
+ endpoint->src4.s_addr = 0;
+ *(__force __be32 *)&endpoint->src_if4 = 0;
+ fl.saddr = 0;
+ if (cache)
+ dst_cache_reset(cache);
+ if (!IS_ERR(rt))
+ ip_rt_put(rt);
+ rt = ip_route_output_flow(sock_net(sock), &fl, sock);
+ }
+ if (unlikely(IS_ERR(rt))) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
+ wg->dev->name, &endpoint->addr, ret);
+ goto err;
+ } else if (unlikely(rt->dst.dev == skb->dev)) {
+ ip_rt_put(rt);
+ ret = -ELOOP;
+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
+ wg->dev->name, &endpoint->addr);
+ goto err;
+ }
+ if (cache)
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+ }
+
+ skb->ignore_df = 1;
+ udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, false);
+ goto out;
+
+err:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+static int send6(struct wg_device *wg, struct sk_buff *skb,
+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct flowi6 fl = {
+ .saddr = endpoint->src6,
+ .daddr = endpoint->addr6.sin6_addr,
+ .fl6_dport = endpoint->addr6.sin6_port,
+ .flowi6_mark = wg->fwmark,
+ .flowi6_oif = endpoint->addr6.sin6_scope_id,
+ .flowi6_proto = IPPROTO_UDP
+ /* TODO: addr->sin6_flowinfo */
+ };
+ struct dst_entry *dst = NULL;
+ struct sock *sock;
+ int ret = 0;
+
+ skb_mark_not_on_list(skb);
+ skb->dev = wg->dev;
+ skb->mark = wg->fwmark;
+
+ rcu_read_lock_bh();
+ sock = rcu_dereference_bh(wg->sock6);
+
+ if (unlikely(!sock)) {
+ ret = -ENONET;
+ goto err;
+ }
+
+ fl.fl6_sport = inet_sk(sock)->inet_sport;
+
+ if (cache)
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+
+ if (!dst) {
+ security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
+ if (unlikely(!ipv6_addr_any(&fl.saddr) &&
+ !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
+ endpoint->src6 = fl.saddr = in6addr_any;
+ if (cache)
+ dst_cache_reset(cache);
+ }
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
+ NULL);
+ if (unlikely(IS_ERR(dst))) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
+ wg->dev->name, &endpoint->addr, ret);
+ goto err;
+ } else if (unlikely(dst->dev == skb->dev)) {
+ dst_release(dst);
+ ret = -ELOOP;
+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
+ wg->dev->name, &endpoint->addr);
+ goto err;
+ }
+ if (cache)
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+ }
+
+ skb->ignore_df = 1;
+ udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, false);
+ goto out;
+
+err:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock_bh();
+ return ret;
+#else
+ return -EAFNOSUPPORT;
+#endif
+}
+
+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds)
+{
+ size_t skb_len = skb->len;
+ int ret = -EAFNOSUPPORT;
+
+ read_lock_bh(&peer->endpoint_lock);
+ if (peer->endpoint.addr.sa_family == AF_INET)
+ ret = send4(peer->device, skb, &peer->endpoint, ds,
+ &peer->endpoint_cache);
+ else if (peer->endpoint.addr.sa_family == AF_INET6)
+ ret = send6(peer->device, skb, &peer->endpoint, ds,
+ &peer->endpoint_cache);
+ else
+ dev_kfree_skb(skb);
+ if (likely(!ret))
+ peer->tx_bytes += skb_len;
+ read_unlock_bh(&peer->endpoint_lock);
+
+ return ret;
+}
+
+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer,
+ size_t len, u8 ds)
+{
+ struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
+
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, SKB_HEADER_LEN);
+ skb_set_inner_network_header(skb, 0);
+ skb_put_data(skb, buffer, len);
+ return wg_socket_send_skb_to_peer(peer, skb, ds);
+}
+
+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
+ struct sk_buff *in_skb, void *buffer,
+ size_t len)
+{
+ int ret = 0;
+ struct sk_buff *skb;
+ struct endpoint endpoint;
+
+ if (unlikely(!in_skb))
+ return -EINVAL;
+ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
+ if (unlikely(ret < 0))
+ return ret;
+
+ skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ skb_reserve(skb, SKB_HEADER_LEN);
+ skb_set_inner_network_header(skb, 0);
+ skb_put_data(skb, buffer, len);
+
+ if (endpoint.addr.sa_family == AF_INET)
+ ret = send4(wg, skb, &endpoint, 0, NULL);
+ else if (endpoint.addr.sa_family == AF_INET6)
+ ret = send6(wg, skb, &endpoint, 0, NULL);
+ /* No other possibilities if the endpoint is valid, which it is,
+ * as we checked above.
+ */
+
+ return ret;
+}
+
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb)
+{
+ memset(endpoint, 0, sizeof(*endpoint));
+ if (skb->protocol == htons(ETH_P_IP)) {
+ endpoint->addr4.sin_family = AF_INET;
+ endpoint->addr4.sin_port = udp_hdr(skb)->source;
+ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ endpoint->src4.s_addr = ip_hdr(skb)->daddr;
+ endpoint->src_if4 = skb->skb_iif;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ endpoint->addr6.sin6_family = AF_INET6;
+ endpoint->addr6.sin6_port = udp_hdr(skb)->source;
+ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
+ endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id(
+ &ipv6_hdr(skb)->saddr, skb->skb_iif);
+ endpoint->src6 = ipv6_hdr(skb)->daddr;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
+{
+ return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET &&
+ a->addr4.sin_port == b->addr4.sin_port &&
+ a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr &&
+ a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) ||
+ (a->addr.sa_family == AF_INET6 &&
+ b->addr.sa_family == AF_INET6 &&
+ a->addr6.sin6_port == b->addr6.sin6_port &&
+ ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) &&
+ a->addr6.sin6_scope_id == b->addr6.sin6_scope_id &&
+ ipv6_addr_equal(&a->src6, &b->src6)) ||
+ unlikely(!a->addr.sa_family && !b->addr.sa_family);
+}
+
+void wg_socket_set_peer_endpoint(struct wg_peer *peer,
+ const struct endpoint *endpoint)
+{
+ /* First we check unlocked, in order to optimize, since it's pretty rare
+ * that an endpoint will change. If we happen to be mid-write, and two
+ * CPUs wind up writing the same thing or something slightly different,
+ * it doesn't really matter much either.
+ */
+ if (endpoint_eq(endpoint, &peer->endpoint))
+ return;
+ write_lock_bh(&peer->endpoint_lock);
+ if (endpoint->addr.sa_family == AF_INET) {
+ peer->endpoint.addr4 = endpoint->addr4;
+ peer->endpoint.src4 = endpoint->src4;
+ peer->endpoint.src_if4 = endpoint->src_if4;
+ } else if (endpoint->addr.sa_family == AF_INET6) {
+ peer->endpoint.addr6 = endpoint->addr6;
+ peer->endpoint.src6 = endpoint->src6;
+ } else {
+ goto out;
+ }
+ dst_cache_reset(&peer->endpoint_cache);
+out:
+ write_unlock_bh(&peer->endpoint_lock);
+}
+
+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
+ const struct sk_buff *skb)
+{
+ struct endpoint endpoint;
+
+ if (!wg_socket_endpoint_from_skb(&endpoint, skb))
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+}
+
+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
+{
+ write_lock_bh(&peer->endpoint_lock);
+ memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
+ dst_cache_reset(&peer->endpoint_cache);
+ write_unlock_bh(&peer->endpoint_lock);
+}
+
+static int wg_receive(struct sock *sk, struct sk_buff *skb)
+{
+ struct wg_device *wg;
+
+ if (unlikely(!sk))
+ goto err;
+ wg = sk->sk_user_data;
+ if (unlikely(!wg))
+ goto err;
+ skb_mark_not_on_list(skb);
+ wg_packet_receive(wg, skb);
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void sock_free(struct sock *sock)
+{
+ if (unlikely(!sock))
+ return;
+ sk_clear_memalloc(sock);
+ udp_tunnel_sock_release(sock->sk_socket);
+}
+
+static void set_sock_opts(struct socket *sock)
+{
+ sock->sk->sk_allocation = GFP_ATOMIC;
+ sock->sk->sk_sndbuf = INT_MAX;
+ sk_set_memalloc(sock->sk);
+}
+
+int wg_socket_init(struct wg_device *wg, u16 port)
+{
+ int ret;
+ struct udp_tunnel_sock_cfg cfg = {
+ .sk_user_data = wg,
+ .encap_type = 1,
+ .encap_rcv = wg_receive
+ };
+ struct socket *new4 = NULL, *new6 = NULL;
+ struct udp_port_cfg port4 = {
+ .family = AF_INET,
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .local_udp_port = htons(port),
+ .use_udp_checksums = true
+ };
+#if IS_ENABLED(CONFIG_IPV6)
+ int retries = 0;
+ struct udp_port_cfg port6 = {
+ .family = AF_INET6,
+ .local_ip6 = IN6ADDR_ANY_INIT,
+ .use_udp6_tx_checksums = true,
+ .use_udp6_rx_checksums = true,
+ .ipv6_v6only = true
+ };
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+retry:
+#endif
+
+ ret = udp_sock_create(wg->creating_net, &port4, &new4);
+ if (ret < 0) {
+ pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
+ return ret;
+ }
+ set_sock_opts(new4);
+ setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6_mod_enabled()) {
+ port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
+ ret = udp_sock_create(wg->creating_net, &port6, &new6);
+ if (ret < 0) {
+ udp_tunnel_sock_release(new4);
+ if (ret == -EADDRINUSE && !port && retries++ < 100)
+ goto retry;
+ pr_err("%s: Could not create IPv6 socket\n",
+ wg->dev->name);
+ return ret;
+ }
+ set_sock_opts(new6);
+ setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+ }
+#endif
+
+ wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
+ return 0;
+}
+
+void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ struct sock *new6)
+{
+ struct sock *old4, *old6;
+
+ mutex_lock(&wg->socket_update_lock);
+ old4 = rcu_dereference_protected(wg->sock4,
+ lockdep_is_held(&wg->socket_update_lock));
+ old6 = rcu_dereference_protected(wg->sock6,
+ lockdep_is_held(&wg->socket_update_lock));
+ rcu_assign_pointer(wg->sock4, new4);
+ rcu_assign_pointer(wg->sock6, new6);
+ if (new4)
+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
+ mutex_unlock(&wg->socket_update_lock);
+ synchronize_rcu();
+ synchronize_net();
+ sock_free(old4);
+ sock_free(old6);
+}
diff --git a/drivers/net/wireguard/socket.h b/drivers/net/wireguard/socket.h
new file mode 100644
index 000000000000..bab5848efbcd
--- /dev/null
+++ b/drivers/net/wireguard/socket.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_SOCKET_H
+#define _WG_SOCKET_H
+
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+
+int wg_socket_init(struct wg_device *wg, u16 port);
+void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ struct sock *new6);
+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data,
+ size_t len, u8 ds);
+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb,
+ u8 ds);
+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
+ struct sk_buff *in_skb,
+ void *out_buffer, size_t len);
+
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb);
+void wg_socket_set_peer_endpoint(struct wg_peer *peer,
+ const struct endpoint *endpoint);
+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
+ const struct sk_buff *skb);
+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer);
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \
+ struct endpoint __endpoint; \
+ wg_socket_endpoint_from_skb(&__endpoint, skb); \
+ net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \
+ ##__VA_ARGS__); \
+ } while (0)
+#else
+#define net_dbg_skb_ratelimited(fmt, skb, ...)
+#endif
+
+#endif /* _WG_SOCKET_H */
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
new file mode 100644
index 000000000000..d54d32ac9bc4
--- /dev/null
+++ b/drivers/net/wireguard/timers.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "timers.h"
+#include "device.h"
+#include "peer.h"
+#include "queueing.h"
+#include "socket.h"
+
+/*
+ * - Timer for retransmitting the handshake if we don't hear back after
+ * `REKEY_TIMEOUT + jitter` ms.
+ *
+ * - Timer for sending empty packet if we have received a packet but after have
+ * not sent one for `KEEPALIVE_TIMEOUT` ms.
+ *
+ * - Timer for initiating new handshake if we have sent a packet but after have
+ * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) +
+ * jitter` ms.
+ *
+ * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms
+ * if no new keys have been received.
+ *
+ * - Timer for, if enabled, sending an empty authenticated packet every user-
+ * specified seconds.
+ */
+
+static inline void mod_peer_timer(struct wg_peer *peer,
+ struct timer_list *timer,
+ unsigned long expires)
+{
+ rcu_read_lock_bh();
+ if (likely(netif_running(peer->device->dev) &&
+ !READ_ONCE(peer->is_dead)))
+ mod_timer(timer, expires);
+ rcu_read_unlock_bh();
+}
+
+static void wg_expired_retransmit_handshake(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer,
+ timer_retransmit_handshake);
+
+ if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2);
+
+ del_timer(&peer->timer_send_keepalive);
+ /* We drop all packets without a keypair and don't try again,
+ * if we try unsuccessfully for too long to make a handshake.
+ */
+ wg_packet_purge_staged_packets(peer);
+
+ /* We set a timer for destroying any residue that might be left
+ * of a partial exchange.
+ */
+ if (!timer_pending(&peer->timer_zero_key_material))
+ mod_peer_timer(peer, &peer->timer_zero_key_material,
+ jiffies + REJECT_AFTER_TIME * 3 * HZ);
+ } else {
+ ++peer->timer_handshake_attempts;
+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, REKEY_TIMEOUT,
+ peer->timer_handshake_attempts + 1);
+
+ /* We clear the endpoint address src address, in case this is
+ * the cause of trouble.
+ */
+ wg_socket_clear_peer_endpoint_src(peer);
+
+ wg_packet_send_queued_handshake_initiation(peer, true);
+ }
+}
+
+static void wg_expired_send_keepalive(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive);
+
+ wg_packet_send_keepalive(peer);
+ if (peer->timer_need_another_keepalive) {
+ peer->timer_need_another_keepalive = false;
+ mod_peer_timer(peer, &peer->timer_send_keepalive,
+ jiffies + KEEPALIVE_TIMEOUT * HZ);
+ }
+}
+
+static void wg_expired_new_handshake(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake);
+
+ pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT);
+ /* We clear the endpoint address src address, in case this is the cause
+ * of trouble.
+ */
+ wg_socket_clear_peer_endpoint_src(peer);
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
+
+static void wg_expired_zero_key_material(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material);
+
+ rcu_read_lock_bh();
+ if (!READ_ONCE(peer->is_dead)) {
+ wg_peer_get(peer);
+ if (!queue_work(peer->device->handshake_send_wq,
+ &peer->clear_peer_work))
+ /* If the work was already on the queue, we want to drop
+ * the extra reference.
+ */
+ wg_peer_put(peer);
+ }
+ rcu_read_unlock_bh();
+}
+
+static void wg_queued_expired_zero_key_material(struct work_struct *work)
+{
+ struct wg_peer *peer = container_of(work, struct wg_peer,
+ clear_peer_work);
+
+ pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, REJECT_AFTER_TIME * 3);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_peer_put(peer);
+}
+
+static void wg_expired_send_persistent_keepalive(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer,
+ timer_persistent_keepalive);
+
+ if (likely(peer->persistent_keepalive_interval))
+ wg_packet_send_keepalive(peer);
+}
+
+/* Should be called after an authenticated data packet is sent. */
+void wg_timers_data_sent(struct wg_peer *peer)
+{
+ if (!timer_pending(&peer->timer_new_handshake))
+ mod_peer_timer(peer, &peer->timer_new_handshake,
+ jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ +
+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+}
+
+/* Should be called after an authenticated data packet is received. */
+void wg_timers_data_received(struct wg_peer *peer)
+{
+ if (likely(netif_running(peer->device->dev))) {
+ if (!timer_pending(&peer->timer_send_keepalive))
+ mod_peer_timer(peer, &peer->timer_send_keepalive,
+ jiffies + KEEPALIVE_TIMEOUT * HZ);
+ else
+ peer->timer_need_another_keepalive = true;
+ }
+}
+
+/* Should be called after any type of authenticated packet is sent, whether
+ * keepalive, data, or handshake.
+ */
+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_send_keepalive);
+}
+
+/* Should be called after any type of authenticated packet is received, whether
+ * keepalive, data, or handshake.
+ */
+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_new_handshake);
+}
+
+/* Should be called after a handshake initiation message is sent. */
+void wg_timers_handshake_initiated(struct wg_peer *peer)
+{
+ mod_peer_timer(peer, &peer->timer_retransmit_handshake,
+ jiffies + REKEY_TIMEOUT * HZ +
+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+}
+
+/* Should be called after a handshake response message is received and processed
+ * or when getting key confirmation via the first data message.
+ */
+void wg_timers_handshake_complete(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_retransmit_handshake);
+ peer->timer_handshake_attempts = 0;
+ peer->sent_lastminute_handshake = false;
+ ktime_get_real_ts64(&peer->walltime_last_handshake);
+}
+
+/* Should be called after an ephemeral key is created, which is before sending a
+ * handshake response or after receiving a handshake response.
+ */
+void wg_timers_session_derived(struct wg_peer *peer)
+{
+ mod_peer_timer(peer, &peer->timer_zero_key_material,
+ jiffies + REJECT_AFTER_TIME * 3 * HZ);
+}
+
+/* Should be called before a packet with authentication, whether
+ * keepalive, data, or handshakem is sent, or after one is received.
+ */
+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer)
+{
+ if (peer->persistent_keepalive_interval)
+ mod_peer_timer(peer, &peer->timer_persistent_keepalive,
+ jiffies + peer->persistent_keepalive_interval * HZ);
+}
+
+void wg_timers_init(struct wg_peer *peer)
+{
+ timer_setup(&peer->timer_retransmit_handshake,
+ wg_expired_retransmit_handshake, 0);
+ timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0);
+ timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0);
+ timer_setup(&peer->timer_zero_key_material,
+ wg_expired_zero_key_material, 0);
+ timer_setup(&peer->timer_persistent_keepalive,
+ wg_expired_send_persistent_keepalive, 0);
+ INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material);
+ peer->timer_handshake_attempts = 0;
+ peer->sent_lastminute_handshake = false;
+ peer->timer_need_another_keepalive = false;
+}
+
+void wg_timers_stop(struct wg_peer *peer)
+{
+ del_timer_sync(&peer->timer_retransmit_handshake);
+ del_timer_sync(&peer->timer_send_keepalive);
+ del_timer_sync(&peer->timer_new_handshake);
+ del_timer_sync(&peer->timer_zero_key_material);
+ del_timer_sync(&peer->timer_persistent_keepalive);
+ flush_work(&peer->clear_peer_work);
+}
diff --git a/drivers/net/wireguard/timers.h b/drivers/net/wireguard/timers.h
new file mode 100644
index 000000000000..f0653dcb1326
--- /dev/null
+++ b/drivers/net/wireguard/timers.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_TIMERS_H
+#define _WG_TIMERS_H
+
+#include <linux/ktime.h>
+
+struct wg_peer;
+
+void wg_timers_init(struct wg_peer *peer);
+void wg_timers_stop(struct wg_peer *peer);
+void wg_timers_data_sent(struct wg_peer *peer);
+void wg_timers_data_received(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer);
+void wg_timers_handshake_initiated(struct wg_peer *peer);
+void wg_timers_handshake_complete(struct wg_peer *peer);
+void wg_timers_session_derived(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer);
+
+static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds,
+ u64 expiration_seconds)
+{
+ return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC)
+ <= (s64)ktime_get_coarse_boottime_ns();
+}
+
+#endif /* _WG_TIMERS_H */
diff --git a/drivers/net/wireguard/version.h b/drivers/net/wireguard/version.h
new file mode 100644
index 000000000000..a1a269a11634
--- /dev/null
+++ b/drivers/net/wireguard/version.h
@@ -0,0 +1 @@
+#define WIREGUARD_VERSION "1.0.0"
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 7b90b8546162..b10972b6cba4 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -62,5 +62,6 @@ source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
+source "drivers/net/wireless/ath/ath11k/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index ee2b2431e5a3..8e4ae9de5ae4 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
+obj-$(CONFIG_ATH11K) += ath11k/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index da2d179430ca..49cc4b7ed516 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -74,7 +74,7 @@ static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
if (cmd->odata) {
if (cmd->olen < olen) {
- ar5523_err(ar, "olen to small %d < %d\n",
+ ar5523_err(ar, "olen too small %d < %d\n",
cmd->olen, olen);
cmd->olen = 0;
cmd->res = -EOVERFLOW;
@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
+ SMCWUSBT-G2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index f80854180e21..ed87bc00f2aa 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -458,7 +458,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->mem_len = resource_size(res);
- ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+ ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
ATH10K_GCC_REG_SIZE);
if (!ar_ahb->gcc_mem) {
ath10k_err(ar, "gcc mem ioremap error\n");
@@ -466,7 +466,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
goto err_mem_unmap;
}
- ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+ ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
ATH10K_TCSR_REG_SIZE);
if (!ar_ahb->tcsr_mem) {
ath10k_err(ar, "tcsr mem ioremap error\n");
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 95dc4be82e5c..ea908107581d 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -346,6 +346,52 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
return 0;
}
+static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd *cmd;
+ u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
+ u32 txlen;
+ int ret;
+ size_t buf_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
+ cmd = kzalloc(buf_len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ while (length) {
+ txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd->id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd->lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd->lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ kfree(cmd);
+
+ return 0;
+}
+
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
@@ -430,7 +476,11 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
- ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+ if (ar->hw_params.bmi_large_size_download)
+ ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
+ else
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index ef3bdba43bed..f6fadcbdd86e 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -45,6 +45,15 @@
sizeof(u32) + \
sizeof(u32))
+/* Maximum data size used for large BMI transfers */
+#define BMI_MAX_LARGE_DATA_SIZE 2048
+
+/* len = cmd + addr + length */
+#define BMI_MAX_LARGE_CMDBUF_SIZE (BMI_MAX_LARGE_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
/* BMI Commands */
enum bmi_cmd_id {
@@ -258,6 +267,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 4f76ba5d78a9..5ec16ce19b69 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -189,6 +189,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
+ .bmi_large_size_download = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -714,18 +715,6 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
- /* Explicitly set fwlog prints to zero as target may turn it on
- * based on scratch registers.
- */
- ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
- if (ret)
- return ret;
-
- param |= HI_OPTION_DISABLE_DBGLOG;
- ret = ath10k_bmi_write32(ar, hi_option_flag, param);
- if (ret)
- return ret;
-
return 0;
}
@@ -3231,6 +3220,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_waitqueue_head(&ar->htt.empty_tx_wq);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
+ skb_queue_head_init(&ar->htt.rx_indication_head);
+
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index af68eb5d0776..5101bf2b5b15 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -124,6 +124,7 @@ struct ath10k_skb_cb {
struct ath10k_skb_rxcb {
dma_addr_t paddr;
struct hlist_node hlist;
+ u8 eid;
};
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -1180,6 +1181,7 @@ struct ath10k {
struct {
/* protected by data_lock */
+ u32 rx_crc_err_drop;
u32 fw_crash_counter;
u32 fw_warm_reset_counter;
u32 fw_cold_reset_counter;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 04c50a26a4f4..e000677ac516 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1094,6 +1094,7 @@ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_rts_good",
"d_tx_power", /* in .5 dbM I think */
"d_rx_crc_err", /* fcs_bad */
+ "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */
"d_no_beacon",
"d_tx_mpdus_queued",
"d_tx_msdu_queued",
@@ -1193,6 +1194,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
data[i++] = pdev_stats->rts_good;
data[i++] = pdev_stats->chan_tx_power;
data[i++] = pdev_stats->fcs_bad;
+ data[i++] = ar->stats.rx_crc_err_drop;
data[i++] = pdev_stats->no_beacons;
data[i++] = pdev_stats->mpdu_enqued;
data[i++] = pdev_stats->msdu_enqued;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 1d4d1a1992fe..2248d6c022f4 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -270,7 +270,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
- if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
+ if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@@ -800,8 +800,8 @@ setup:
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status) {
- ath10k_warn(ar, "unsupported HTC service id: %d\n",
- ep->service_id);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
+ ep->service_id);
return status;
}
@@ -878,8 +878,8 @@ static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
&ul_pipe_id,
&dl_pipe_id);
if (status) {
- ath10k_warn(ar, "unsupported HTC service id: %d\n",
- ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index f55d3caec61f..065c82d9d689 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
+#include <linux/bitfield.h>
struct ath10k;
@@ -39,7 +40,7 @@ struct ath10k;
* 4-byte aligned.
*/
-#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 32
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -49,9 +50,27 @@ enum ath10k_htc_tx_flags {
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
- ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
+#define ATH10K_HTC_FLAG_BUNDLE_MASK GENMASK(7, 4)
+
+/* bits 2-3 are for extra bundle count bits 4-5 */
+#define ATH10K_HTC_BUNDLE_EXTRA_MASK GENMASK(3, 2)
+#define ATH10K_HTC_BUNDLE_EXTRA_SHIFT 4
+
+static inline unsigned int ath10k_htc_get_bundle_count(u8 max_msgs, u8 flags)
+{
+ unsigned int count, extra_count = 0;
+
+ count = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, flags);
+
+ if (max_msgs > 16)
+ extra_count = FIELD_GET(ATH10K_HTC_BUNDLE_EXTRA_MASK, flags) <<
+ ATH10K_HTC_BUNDLE_EXTRA_SHIFT;
+
+ return count + extra_count;
+}
+
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 30c080094af1..4a12564fc30e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1869,6 +1869,8 @@ struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
+ struct sk_buff_head rx_indication_head;
+
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
@@ -2283,6 +2285,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index d95b63f133ab..38a5814cf345 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1285,6 +1285,13 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
status = IEEE80211_SKB_RXCB(skb);
+ if (!(ar->filter_flags & FIF_FCSFAIL) &&
+ status->flag & RX_FLAG_FAILED_FCS_CRC) {
+ ar->stats.rx_crc_err_drop++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
@@ -2133,7 +2140,7 @@ static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
if (last_pn_valid)
pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
else
- peer->tids_last_pn_valid[tid] = 1;
+ peer->tids_last_pn_valid[tid] = true;
if (!pn_invalid)
last_pn->pn48 = new_pn.pn48;
@@ -2196,8 +2203,8 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
HTT_RX_IND_MPDU_STATUS_OK &&
mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
- ath10k_warn(ar, "MPDU range status: %d\n",
- mpdu_ranges->mpdu_range_status);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
+ mpdu_ranges->mpdu_range_status);
goto err;
}
@@ -2235,8 +2242,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_is_data_qos(hdr->frame_control);
+
rx_status = IEEE80211_SKB_RXCB(skb);
- rx_status->chains |= BIT(0);
+ memset(rx_status, 0, sizeof(*rx_status));
+
if (rx->ppdu.combined_rssi == 0) {
/* SDIO firmware does not provide signal */
rx_status->signal = 0;
@@ -2350,7 +2359,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
}
- ieee80211_rx_ni(ar->hw, skb);
+ if (ar->napi.dev)
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+ else
+ ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
@@ -3751,14 +3763,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
- return ath10k_htt_rx_proc_rx_ind_hl(htt,
- &resp->rx_ind_hl,
- skb,
- HTT_RX_PN_CHECK,
- HTT_RX_NON_TKIP_MIC);
- else
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
+ } else {
+ skb_queue_tail(&htt->rx_indication_head, skb);
+ return false;
+ }
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@@ -3948,6 +3958,37 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
return quota;
}
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
+{
+ struct htt_resp *resp;
+ struct ath10k_htt *htt = &ar->htt;
+ struct sk_buff *skb;
+ bool release;
+ int quota;
+
+ for (quota = 0; quota < budget; quota++) {
+ skb = skb_dequeue(&htt->rx_indication_head);
+ if (!skb)
+ break;
+
+ resp = (struct htt_resp *)skb->data;
+
+ release = ath10k_htt_rx_proc_rx_ind_hl(htt,
+ &resp->rx_ind_hl,
+ skb,
+ HTT_RX_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+
+ if (release)
+ dev_kfree_skb_any(skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
+ skb_queue_len(&htt->rx_indication_head));
+ }
+ return quota;
+}
+EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
+
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35a362329a4f..775fd62fb92d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -613,6 +613,9 @@ struct ath10k_hw_params {
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
+ /* target supporting fw download via large size BMI */
+ bool bmi_large_size_download;
+
/* need to set uart pin if disable uart print, workaround for a
* firmware bug
*/
@@ -813,7 +816,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
#define TARGET_10_4_NUM_WDS_ENTRIES 32
-#define TARGET_10_4_DMA_BURST_SIZE 0
+#define TARGET_10_4_DMA_BURST_SIZE 1
#define TARGET_10_4_MAC_AGGR_DELIM 0
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 978f0037ed52..7fee35ff966b 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1098,7 +1098,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
+ ath10k_warn(ar, "failed to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
@@ -6329,6 +6329,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
ar->wmi.peer_param->authorize, 1);
+ else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr,
+ ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -8908,6 +8911,7 @@ int ath10k_mac_register(struct ath10k *ar)
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
}
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bb44f5a0941b..ded7a220a4aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1578,7 +1578,7 @@ static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
return 0;
}
-/* if an error happened returns < 0, otherwise the length */
+/* Always returns the length */
static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
const struct ath10k_mem_region *region,
u8 *buf)
@@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
+ ret = -EIO;
+ goto done;
+ }
for (i = 0; i < region->len; i += 4)
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
- return region->len;
+ ret = region->len;
+done:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
/* if an error happened returns < 0, otherwise the length */
@@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
break;
case ATH10K_MEM_REGION_TYPE_IOREG:
- count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
break;
default:
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index a0ba07b85362..85dce43c5439 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -84,6 +84,9 @@ static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
int ret;
int i;
+ if (qmi->msa_fixed_perm)
+ return 0;
+
for (i = 0; i < qmi->nr_mem_region; i++) {
ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
if (ret)
@@ -102,6 +105,9 @@ static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
{
int i;
+ if (qmi->msa_fixed_perm)
+ return;
+
for (i = 0; i < qmi->nr_mem_region; i++)
ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
}
@@ -279,7 +285,15 @@ static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* end = 1 triggers a CRC check on the BDF. If this fails, we
+ * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
+ * willing to use the BDF. For some platforms, all the valid
+ * released BDFs fail this CRC check, so attempt to detect this
+ * scenario and treat it as non-fatal.
+ */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ !(req->end == 1 &&
+ resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
ath10k_err(ar, "failed to download board data file: %d\n",
resp.resp.error);
ret = -EINVAL;
@@ -635,7 +649,9 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* older FW didn't support this request, which is not fatal */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
ret = -EINVAL;
goto out;
@@ -1025,6 +1041,9 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
qmi->msa_mem_size = msa_size;
}
+ if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
+ qmi->msa_fixed_perm = true;
+
ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
&qmi->msa_pa,
qmi->msa_va);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 40aafb875ed0..dc257375f161 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -104,6 +104,7 @@ struct ath10k_qmi {
bool fw_ready;
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
+ bool msa_fixed_perm;
};
int ath10k_qmi_wlan_enable(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 120200a93bcc..e5316b911e1d 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -24,6 +24,8 @@
#include "trace.h"
#include "sdio.h"
+#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
+
/* inlined helper functions */
static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
@@ -417,6 +419,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_sdio_rx_data *pkt;
struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
@@ -462,10 +465,16 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
if (ret)
goto out;
- if (!pkt->trailer_only)
- ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
- else
+ if (!pkt->trailer_only) {
+ cb = ATH10K_SKB_RXCB(pkt->skb);
+ cb->eid = id;
+
+ skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
+ queue_work(ar->workqueue_aux,
+ &ar_sdio->async_work_rx);
+ } else {
kfree_skb(pkt->skb);
+ }
/* The RX complete handler now owns the skb...*/
pkt->skb = NULL;
@@ -484,21 +493,22 @@ out:
return ret;
}
-static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
- struct ath10k_sdio_rx_data *rx_pkts,
- struct ath10k_htc_hdr *htc_hdr,
- size_t full_len, size_t act_len,
- size_t *bndl_cnt)
+static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *rx_pkts,
+ struct ath10k_htc_hdr *htc_hdr,
+ size_t full_len, size_t act_len,
+ size_t *bndl_cnt)
{
int ret, i;
+ u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
- *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
+ *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
+ if (*bndl_cnt > max_msgs) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
- HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
+ max_msgs);
return -ENOMEM;
}
@@ -529,12 +539,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
size_t full_len, act_len;
bool last_in_bundle;
int ret, i;
+ int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
- ath10k_warn(ar,
- "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
- n_lookaheads,
- ATH10K_SDIO_MAX_RX_MSGS);
+ ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
}
@@ -543,10 +552,8 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
last_in_bundle = false;
- if (le16_to_cpu(htc_hdr->len) >
- ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
- ath10k_warn(ar,
- "payload length %d exceeds max htc length: %zu\n",
+ if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
+ ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
@@ -557,36 +564,37 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
- ath10k_warn(ar,
- "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
+ ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
htc_hdr->eid, htc_hdr->flags,
le16_to_cpu(htc_hdr->len));
ret = -EINVAL;
goto err;
}
- if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
+ if (ath10k_htc_get_bundle_count(
+ ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
/* HTC header indicates that every packet to follow
* has the same padded length so that it can be
* optimally fetched as a full bundle.
*/
size_t bndl_cnt;
- ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
- &ar_sdio->rx_pkts[i],
- htc_hdr,
- full_len,
- act_len,
- &bndl_cnt);
+ ret = ath10k_sdio_mbox_alloc_bundle(ar,
+ &ar_sdio->rx_pkts[pkt_cnt],
+ htc_hdr,
+ full_len,
+ act_len,
+ &bndl_cnt);
if (ret) {
- ath10k_warn(ar, "alloc_bundle error %d\n", ret);
+ ath10k_warn(ar, "failed to allocate a bundle: %d\n",
+ ret);
goto err;
}
- n_lookaheads += bndl_cnt;
- i += bndl_cnt;
- /*Next buffer will be the last in the bundle */
+ pkt_cnt += bndl_cnt;
+
+ /* next buffer will be the last in the bundle */
last_in_bundle = true;
}
@@ -597,7 +605,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
- ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
act_len,
full_len,
last_in_bundle,
@@ -606,9 +614,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
goto err;
}
+
+ pkt_cnt++;
}
- ar_sdio->n_rx_pkts = i;
+ ar_sdio->n_rx_pkts = pkt_cnt;
return 0;
@@ -622,10 +632,10 @@ err:
return ret;
}
-static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
- struct ath10k_sdio_rx_data *pkt)
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
struct sk_buff *skb = pkt->skb;
struct ath10k_htc_hdr *htc_hdr;
int ret;
@@ -633,48 +643,75 @@ static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
skb->data, pkt->alloc_len);
if (ret)
- goto out;
+ goto err;
- /* Update actual length. The original length may be incorrect,
- * as the FW will bundle multiple packets as long as their sizes
- * fit within the same aligned length (pkt->alloc_len).
- */
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
if (pkt->act_len > pkt->alloc_len) {
- ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
- pkt->act_len, pkt->alloc_len);
- ret = -EMSGSIZE;
- goto out;
+ ret = -EINVAL;
+ goto err;
}
skb_put(skb, pkt->act_len);
+ return 0;
-out:
- pkt->status = ret;
+err:
+ ar_sdio->n_rx_pkts = 0;
+ ath10k_sdio_mbox_free_rx_pkt(pkt);
return ret;
}
-static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
+static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_hdr *htc_hdr;
int ret, i;
+ u32 pkt_offset, virt_pkt_len;
+
+ virt_pkt_len = 0;
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
+ virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
+
+ if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
+ ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
+ ret = -E2BIG;
+ goto err;
+ }
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ ar_sdio->vsg_buffer, virt_pkt_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read bundle packets: %d", ret);
+ goto err;
+ }
+
+ pkt_offset = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
- ret = ath10k_sdio_mbox_rx_packet(ar,
- &ar_sdio->rx_pkts[i]);
- if (ret)
+ pkt = &ar_sdio->rx_pkts[i];
+ htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
+ if (pkt->act_len > pkt->alloc_len ) {
+ ret = -EINVAL;
goto err;
+ }
+
+ skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
+ pkt_offset += pkt->alloc_len;
}
return 0;
err:
/* Free all packets that was not successfully fetched. */
- for (; i < ar_sdio->n_rx_pkts; i++)
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+ ar_sdio->n_rx_pkts = 0;
+
return ret;
}
@@ -717,7 +754,10 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
*/
*done = false;
- ret = ath10k_sdio_mbox_rx_fetch(ar);
+ if (ar_sdio->n_rx_pkts > 1)
+ ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
+ else
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
/* Process fetched packets. This will potentially update
* n_lookaheads depending on if the packets contain lookahead
@@ -1293,6 +1333,31 @@ static void __ath10k_sdio_write_async(struct ath10k *ar,
ath10k_sdio_free_bus_req(ar, req);
}
+/* To improve throughput use workqueue to deliver packets to HTC layer,
+ * this way SDIO bus is utilised much better.
+ */
+static void ath10k_rx_indication_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ async_work_rx);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&ar_sdio->rx_head);
+ if (!skb)
+ break;
+ cb = ATH10K_SKB_RXCB(skb);
+ ep = &ar->htc.endpoint[cb->eid];
+ ep->ep_ops.ep_rx_complete(ar, skb);
+ }
+
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ napi_schedule(&ar->napi);
+}
+
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
@@ -1681,6 +1746,8 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
int ret;
+ napi_enable(&ar->napi);
+
/* Sleep 20 ms before HIF interrupts are disabled.
* This will give target plenty of time to process the BMI done
* request before interrupts are disabled.
@@ -1805,13 +1872,16 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
}
#ifdef CONFIG_PM
static int ath10k_sdio_hif_suspend(struct ath10k *ar)
{
- return -EOPNOTSUPP;
+ return 0;
}
static int ath10k_sdio_hif_resume(struct ath10k *ar)
@@ -1961,7 +2031,26 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
*/
static int ath10k_sdio_pm_suspend(struct device *device)
{
- return 0;
+ struct sdio_func *func = dev_to_sdio_func(device);
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ mmc_pm_flag_t pm_flag, pm_caps;
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return 0;
+
+ pm_flag = MMC_PM_KEEP_POWER;
+
+ ret = sdio_set_host_pm_flags(func, pm_flag);
+ if (ret) {
+ pm_caps = sdio_get_host_pm_caps(func);
+ ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
+ pm_flag, pm_caps, ret);
+ return ret;
+ }
+
+ return ret;
}
static int ath10k_sdio_pm_resume(struct device *device)
@@ -1980,6 +2069,20 @@ static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
#endif /* CONFIG_PM_SLEEP */
+static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
static int ath10k_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -2005,6 +2108,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
+ ATH10K_NAPI_BUDGET);
+
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
@@ -2020,6 +2126,12 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
+ ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->vsg_buffer) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
ar_sdio->irq_data.irq_en_reg =
devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
GFP_KERNEL);
@@ -2028,7 +2140,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
- ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
if (!ar_sdio->bmi_buf) {
ret = -ENOMEM;
goto err_core_destroy;
@@ -2057,6 +2169,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
+ skb_queue_head_init(&ar_sdio->rx_head);
+ INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
+
dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
switch (dev_id_base) {
case QCA_MANUFACTURER_ID_AR6005_BASE:
@@ -2080,6 +2195,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
bus_params.chip_id = 0;
bus_params.hl_msdu_ids = true;
+ ar->hw->max_mtu = ETH_DATA_LEN;
+
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2106,6 +2223,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
func->num, func->vendor, func->device);
ath10k_core_unregister(ar);
+
+ netif_napi_del(&ar->napi);
+
ath10k_core_destroy(ar);
flush_workqueue(ar_sdio->workqueue);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index b8c7ac0330bd..33195f49acab 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -89,10 +89,10 @@
* to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
- (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
@@ -126,7 +126,6 @@ struct ath10k_sdio_rx_data {
bool part_of_bundle;
bool last_in_bundle;
bool trailer_only;
- int status;
};
struct ath10k_sdio_irq_proc_regs {
@@ -138,8 +137,8 @@ struct ath10k_sdio_irq_proc_regs {
u8 rx_lookahead_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
- __le32 rx_lookahead[2];
- __le32 rx_gmbox_lookahead_alias[2];
+ __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
+ __le32 int_status_enable;
};
struct ath10k_sdio_irq_enable_regs {
@@ -187,6 +186,9 @@ struct ath10k_sdio {
struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
/* free list of bus requests */
struct list_head bus_req_freeq;
+
+ struct sk_buff_head rx_head;
+
/* protects access to bus_req_freeq */
spinlock_t lock;
@@ -196,6 +198,13 @@ struct ath10k_sdio {
struct ath10k *ar;
struct ath10k_sdio_irq_data irq_data;
+ /* temporary buffer for sdio read.
+ * It is allocated when probe, and used for receive bundled packets,
+ * the read for bundled packets is not parallel, so it does not need
+ * protected.
+ */
+ u8 *vsg_buffer;
+
/* temporary buffer for BMI requests */
u8 *bmi_buf;
@@ -206,6 +215,8 @@ struct ath10k_sdio {
struct list_head wr_asyncq;
/* protects access to wr_asyncq */
spinlock_t wr_async_lock;
+
+ struct work_struct async_work_rx;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 16177497bba7..21081b4a27d7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -46,7 +46,7 @@ static const char * const ath10k_regulators[] = {
};
static const char * const ath10k_clocks[] = {
- "cxo_ref_clk_pin",
+ "cxo_ref_clk_pin", "qdss",
};
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -582,7 +582,7 @@ static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
- ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -1201,7 +1201,7 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
irqflags, ce_name[id], ar);
if (ret) {
ath10k_err(ar,
- "failed to register IRQ handler for CE %d: %d",
+ "failed to register IRQ handler for CE %d: %d\n",
id, ret);
goto err_irq;
}
@@ -1466,7 +1466,6 @@ MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
static int ath10k_snoc_probe(struct platform_device *pdev)
{
const struct ath10k_snoc_drv_priv *drv_data;
- const struct of_device_id *of_id;
struct ath10k_snoc *ar_snoc;
struct device *dev;
struct ath10k *ar;
@@ -1474,18 +1473,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
int ret;
u32 i;
- of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ dev = &pdev->dev;
+ drv_data = device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "failed to find matching device tree id\n");
return -EINVAL;
}
- drv_data = of_id->data;
- dev = &pdev->dev;
-
ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
if (ret) {
- dev_err(dev, "failed to set dma mask: %d", ret);
+ dev_err(dev, "failed to set dma mask: %d\n", ret);
return ret;
}
@@ -1563,13 +1560,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
- goto err_core_destroy;
+ goto err_power_off;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
+err_power_off:
+ ath10k_hw_power_off(ar);
+
err_free_irq:
ath10k_snoc_free_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 1bffe3fbea3f..7a9b9bbcdbfc 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -65,7 +65,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
if (ret) {
ath10k_warn(ar,
- "failed to to put testmode wmi event cmd attribute: %d\n",
+ "failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
@@ -74,7 +74,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath10k_warn(ar,
- "failed to to put testmode wmi even cmd_id: %d\n",
+ "failed to put testmode wmi event cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index ab916459d237..842e42ec814f 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -239,7 +239,7 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
@@ -269,7 +269,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -435,7 +435,7 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 69a1ec53df29..4e68debda9bf 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
const struct wmi_tlv_mgmt_rx_ev *ev;
const u8 *frame;
u32 msdu_len;
- int ret;
+ int ret, i;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -865,6 +865,9 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
arg->phy_mode = ev->phy_mode;
arg->rate = ev->rate;
+ for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
+ arg->rssi[i] = ev->rssi[i];
+
msdu_len = __le32_to_cpu(arg->buf_len);
if (skb->len < (frame - skb->data) + msdu_len) {
@@ -3707,6 +3710,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
+ u16 tlv_len;
size_t len;
void *ptr;
u32 i;
@@ -3764,10 +3768,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
+ tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
+ sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
- tlv->len = __cpu_to_le16(len);
+ tlv->len = __cpu_to_le16(tlv_len);
ptr += sizeof(*tlv);
nlo_list = ptr;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 9f564e2b7a14..61885d4d662c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2463,10 +2463,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rx_status;
u32 channel;
u32 phy_mode;
- u32 snr;
+ u32 snr, rssi;
u32 rate;
u16 fc;
- int ret;
+ int ret, i;
ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
if (ret) {
@@ -2525,6 +2525,20 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+
+ BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
+
+ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+ status->chains &= ~BIT(i);
+ rssi = __le32_to_cpu(arg.rssi[i]);
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
+
+ if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
+ status->chains |= BIT(i);
+ }
+ }
+
status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -9476,7 +9490,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
- msdu->len, DMA_FROM_DEVICE);
+ msdu->len, DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, msdu);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 74adce1dd3a9..972d53d77654 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6786,6 +6786,7 @@ struct wmi_peer_delete_resp_ev_arg {
struct wmi_mac_addr peer_addr;
};
+#define WMI_MGMT_RX_NUM_RSSI 4
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -6794,6 +6795,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
struct wmi_mgmt_rx_ext_info ext_info;
+ __le32 rssi[WMI_MGMT_RX_NUM_RSSI];
};
struct wmi_ch_info_ev_arg {
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
new file mode 100644
index 000000000000..c88e16d4022b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH11K
+ tristate "Qualcomm Technologies 802.11ax chipset support"
+ depends on MAC80211 && HAS_DMA
+ depends on REMOTEPROC
+ depends on ARCH_QCOM || COMPILE_TEST
+ select ATH_COMMON
+ select QCOM_QMI_HELPERS
+ ---help---
+ This module adds support for Qualcomm Technologies 802.11ax family of
+ chipsets.
+
+ If you choose to build a module, it'll be called ath11k.
+
+config ATH11K_DEBUG
+ bool "QCA ath11k debugging"
+ depends on ATH11K
+ ---help---
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_DEBUGFS
+ bool "QCA ath11k debugfs support"
+ depends on ATH11K && DEBUG_FS && MAC80211_DEBUGFS
+ ---help---
+ Enable ath11k debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_TRACING
+ bool "ath11k tracing support"
+ depends on ATH11K && EVENT_TRACING
+ ---help---
+ Select this to use ath11k tracing infrastructure.
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
new file mode 100644
index 000000000000..2761d07d938e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH11K) += ath11k.o
+ath11k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ ahb.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o
+
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
new file mode 100644
index 000000000000..e7e3e64c07aa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include "ahb.h"
+#include "debug.h"
+#include <linux/remoteproc.h>
+
+static const struct of_device_id ath11k_ahb_of_match[] = {
+ /* TODO: Should we change the compatible string to something similar
+ * to one that ath10k uses?
+ */
+ { .compatible = "qcom,ipq8074-wifi",
+ .data = (void *)ATH11K_HW_IPQ8074,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(0),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+#define ATH11K_IRQ_CE0_OFFSET 4
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+#define ATH11K_TX_RING_MASK_0 0x1
+#define ATH11K_TX_RING_MASK_1 0x2
+#define ATH11K_TX_RING_MASK_2 0x4
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+};
+
+const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+};
+
+const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+};
+
+const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RX_ERR_RING_MASK_0,
+};
+
+const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+};
+
+const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+};
+
+const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+};
+
+const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+};
+
+/* enum ext_irq_num - irq numbers that can be used by external modules
+ * like datapath
+ */
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+
+ while ((skb = __skb_dequeue(&irq_grp->pending_q)))
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_pipe_config *ce_config;
+
+ ce_config = &target_ce_config_wlan[ce_id];
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ CE_HOST_IE_3_ADDRESS);
+ }
+}
+
+static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_pipe_config *ce_config;
+
+ ce_config = &target_ce_config_wlan[ce_id];
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ CE_HOST_IE_3_ADDRESS);
+ }
+}
+
+static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+int ath11k_ahb_start(struct ath11k_base *ab)
+{
+ ath11k_ahb_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_ahb_ext_irq_disable(ab);
+ ath11k_ahb_sync_ext_irqs(ab);
+}
+
+void ath11k_ahb_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_ahb_ce_irqs_disable(ab);
+ ath11k_ahb_sync_ce_irqs(ab);
+ ath11k_ahb_kill_tasklets(ab);
+ del_timer_sync(&ab->rx_replenish_retry);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+int ath11k_ahb_power_up(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = rproc_boot(ab->tgt_rproc);
+ if (ret)
+ ath11k_err(ab, "failed to boot the remote processor Q6\n");
+
+ return ret;
+}
+
+void ath11k_ahb_power_down(struct ath11k_base *ab)
+{
+ rproc_shutdown(ab->tgt_rproc);
+}
+
+static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ARRAY_SIZE(target_ce_config_wlan) - 1;
+ cfg->tgt_ce = target_ce_config_wlan;
+ cfg->svc_to_ce_map_len = ARRAY_SIZE(target_service_to_ce_map_wlan);
+ cfg->svc_to_ce_map = target_service_to_ce_map_wlan;
+}
+
+static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+ }
+}
+
+static void ath11k_ahb_free_irq(struct ath11k_base *ab)
+{
+ int irq_idx;
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_ahb_free_ext_irq(ab);
+}
+
+static void ath11k_ahb_ce_tasklet(unsigned long data)
+{
+ struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq;
+ int ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ __skb_queue_head_init(&irq_grp->pending_q);
+
+ for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
+ if (ath11k_tx_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ath11k_rx_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ath11k_rx_err_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ath11k_rx_wbm_rel_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ath11k_reo_status_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (j < MAX_RADIOS) {
+ if (ath11k_rxdma2host_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_destination_ring_mac1
+ - ath11k_core_get_hw_mac_id(ab, j);
+ }
+
+ if (ath11k_host2rxdma_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ host2rxdma_host_buf_ring_mac1
+ - ath11k_core_get_hw_mac_id(ab, j);
+ }
+
+ if (rx_mon_status_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ ppdu_end_interrupts_mac1 -
+ ath11k_core_get_hw_mac_id(ab, j);
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_status_ring_mac1 -
+ ath11k_core_get_hw_mac_id(ab, j);
+ }
+ }
+ }
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request_irq for %d\n",
+ irq);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_ahb_config_irq(struct ath11k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < CE_COUNT; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+
+ tasklet_init(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet,
+ (unsigned long)ce_pipe);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath11k_ahb_ext_irq_config(ab);
+
+ return ret;
+}
+
+int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int ath11k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath11k_base *ab;
+ const struct of_device_id *of_id;
+ struct resource *mem_res;
+ void __iomem *mem;
+ int ret;
+
+ of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev, "failed to get IO memory resource\n");
+ return -ENXIO;
+ }
+
+ mem = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
+ return ret;
+ }
+
+ ab = ath11k_core_alloc(&pdev->dev);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->pdev = pdev;
+ ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+ platform_set_drvdata(pdev, ab);
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ return 0;
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_core_free:
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ reinit_completion(&ab->driver_recovery);
+
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
+ wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+
+ ath11k_core_deinit(ab);
+ ath11k_ahb_free_irq(ab);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath11k_ahb_driver = {
+ .driver = {
+ .name = "ath11k",
+ .of_match_table = ath11k_ahb_of_match,
+ },
+ .probe = ath11k_ahb_probe,
+ .remove = ath11k_ahb_remove,
+};
+
+int ath11k_ahb_init(void)
+{
+ return platform_driver_register(&ath11k_ahb_driver);
+}
+
+void ath11k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath11k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
new file mode 100644
index 000000000000..93f46dfe22df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_AHB_H
+#define ATH11K_AHB_H
+
+#include "core.h"
+
+#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+struct ath11k_base;
+
+static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
+{
+ return ioread32(ab->mem + offset);
+}
+
+static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ iowrite32(value, ab->mem + offset);
+}
+
+void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab);
+int ath11k_ahb_start(struct ath11k_base *ab);
+void ath11k_ahb_stop(struct ath11k_base *ab);
+int ath11k_ahb_power_up(struct ath11k_base *ab);
+void ath11k_ahb_power_down(struct ath11k_base *ab);
+int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+
+int ath11k_ahb_init(void);
+void ath11k_ahb_exit(void);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
new file mode 100644
index 000000000000..cdd40c8fc867
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -0,0 +1,808 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+
+static const struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: host->target WMI (mac2) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE11: Not used */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ u32 *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath11k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath11k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ u32 *desc;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+ if (*nbytes == 0) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+ u32 *desc;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+
+ while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static int ath11k_ce_init_ring(struct ath11k_base *ab,
+ struct ath11k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = { 0 };
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
+ if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath11k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+ ce_ring->hal_ring_id = ret;
+
+ return 0;
+}
+
+static struct ath11k_ce_ring *
+ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
+{
+ struct ath11k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (ce_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space = PTR_ALIGN(
+ ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ ce_ring->base_addr_ce_space = ALIGN(
+ ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
+ struct ath11k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = ath11k_ce_send_done_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+
+ if (pipe->send_cb)
+ pipe->send_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath11k_ce_recv_process_cb(pipe);
+}
+
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
+ pipe->send_cb(pipe);
+}
+
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_srng *srng;
+ u32 *desc;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH11K_CE_USAGE_THRESHOLD)
+ ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath11k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath11k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+
+void ath11k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+ ath11k_ce_rx_post_buf(ab);
+}
+
+int ath11k_ce_init_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath11k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath11k_ce_free_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->src_ring->base_addr_owner_space,
+ pipe->src_ring->base_addr_ce_space);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->dest_ring->base_addr_owner_space,
+ pipe->dest_ring->base_addr_ce_space);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->status_ring->base_addr_owner_space,
+ pipe->status_ring->base_addr_ce_space);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < CE_COUNT; i++) {
+ attr = &host_ce_config_wlan[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath11k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any parial successful allocation */
+ ath11k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* For Big Endian Host, Copy Engine byte_swap is enabled
+ * When Copy Engine does byte_swap, need to byte swap again for the
+ * Host to get/put buffer content in the correct byte order
+ */
+void ath11k_ce_byte_swap(void *mem, u32 len)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ if (!mem)
+ return;
+
+ for (i = 0; i < (len / 4); i++) {
+ *(u32 *)mem = swab32(*(u32 *)mem);
+ mem += 4;
+ }
+ }
+}
+
+int ath11k_ce_get_attr_flags(int ce_id)
+{
+ if (ce_id >= CE_COUNT)
+ return -EINVAL;
+
+ return host_ce_config_wlan[ce_id].flags;
+}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
new file mode 100644
index 000000000000..e355dfda48bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CE_H
+#define ATH11K_CE_H
+
+#define CE_COUNT 12
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#ifdef __BIG_ENDIAN
+#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
+#else
+#define CE_ATTR_FLAGS 0
+#endif
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH11K_CE_USAGE_THRESHOLD 32
+
+void ath11k_ce_byte_swap(void *mem, u32 len);
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH11K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath11k_base;
+
+/*
+ * Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath11k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[0];
+};
+
+struct ath11k_ce_pipe {
+ struct ath11k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath11k_ce_pipe *);
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+
+ struct tasklet_struct intr_tq;
+ struct ath11k_ce_ring *src_ring;
+ struct ath11k_ce_ring *dest_ring;
+ struct ath11k_ce_ring *status_ring;
+};
+
+struct ath11k_ce {
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+};
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
+void ath11k_ce_rx_replenish_retry(struct timer_list *t);
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
+int ath11k_ce_init_pipes(struct ath11k_base *ab);
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
+void ath11k_ce_free_pipes(struct ath11k_base *ab);
+int ath11k_ce_get_attr_flags(int ce_id);
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
new file mode 100644
index 000000000000..9e823056e673
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include "ahb.h"
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+
+unsigned int ath11k_debug_mask;
+module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static const struct ath11k_hw_params ath11k_hw_params = {
+ .name = "ipq8074",
+ .fw = {
+ .dir = IPQ8074_FW_DIR,
+ .board_size = IPQ8074_MAX_BOARD_DATA_SZ,
+ .cal_size = IPQ8074_MAX_CAL_DATA_SZ,
+ },
+};
+
+/* Map from pdev index to hw mac index */
+u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ ath11k_warn(ab, "Invalid pdev idx %d\n", pdev_idx);
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ /* Note: bus is fixed to ahb. When other bus type supported,
+ * make it to dynamic.
+ */
+ scnprintf(name, name_len,
+ "bus=ahb,qmi-chip-id=%d,qmi-board-id=%d",
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = firmware_request_nowarn(&fw, filename, ab->dev);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+
+ if (ret)
+ return ERR_PTR(ret);
+ ath11k_warn(ab, "Downloading BDF: %s, size: %zu\n",
+ filename, fw->size);
+
+ return fw;
+}
+
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int bd_ie_type)
+{
+ const struct ath11k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH11K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath11k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH11K_BD_IE_BOARD_NAME:
+ ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH11K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot found board data for '%s'", boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *boardname)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename = ATH11K_BOARD_API2_FILE;
+ size_t ie_len;
+ struct ath11k_fw_ie *hdr;
+ int ret, ie_id;
+
+ if (!bd->fw)
+ bd->fw = ath11k_fetch_fw_file(ab,
+ ab->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu\n",
+ ab->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
+ ath11k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu\n",
+ ab->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath11k_fw_ie)) {
+ hdr = (struct ath11k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ return -EINVAL;
+ }
+
+ switch (ie_id) {
+ case ATH11K_BD_IE_BOARD:
+ ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ATH11K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath11k_err(ab,
+ "failed to fetch board data for %s from %s/%s\n",
+ boardname, ab->hw_params.fw.dir, filename);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath11k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd)
+{
+ bd->fw = ath11k_fetch_fw_file(ab,
+ ab->hw_params.fw.dir,
+ ATH11K_DEFAULT_BOARD_FILE);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 100
+int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_err(ab, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ab->bd_api = 2;
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname);
+ if (!ret)
+ goto success;
+
+ ab->bd_api = 1;
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd);
+ if (ret) {
+ ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ab->hw_params.fw.dir);
+ return ret;
+ }
+
+success:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+ return 0;
+}
+
+static void ath11k_core_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_qmi_firmware_stop(ab);
+ ath11k_ahb_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath11k_core_soc_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_qmi_init_service(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_debug_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create ath11k debugfs\n");
+ goto err_qmi_deinit;
+ }
+
+ ret = ath11k_ahb_power_up(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to power up :%d\n", ret);
+ goto err_debugfs_reg;
+ }
+
+ return 0;
+
+err_debugfs_reg:
+ ath11k_debug_soc_destroy(ab);
+err_qmi_deinit:
+ ath11k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath11k_core_soc_destroy(struct ath11k_base *ab)
+{
+ ath11k_debug_soc_destroy(ab);
+ ath11k_dp_free(ab);
+ ath11k_reg_free(ab);
+ ath11k_qmi_deinit_service(ab);
+}
+
+static int ath11k_core_pdev_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_debug_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_pdev_debug;
+ }
+
+ ret = ath11k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_mac_unregister;
+ }
+
+ return 0;
+
+err_mac_unregister:
+ ath11k_mac_unregister(ab);
+
+err_pdev_debug:
+ ath11k_debug_pdev_destroy(ab);
+
+ return ret;
+}
+
+static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
+{
+ ath11k_mac_unregister(ab);
+ ath11k_ahb_ext_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_debug_pdev_destroy(ab);
+}
+
+static int ath11k_core_start(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
+{
+ int ret;
+
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_attach(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath11k_htc_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_ahb_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_connect(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_htc_start(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_mac_allocate(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath11k_dp_pdev_pre_alloc(ab);
+
+ ret = ath11k_dp_pdev_reo_setup(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath11k_wmi_cmd_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath11k_mac_destroy(ab);
+err_hif_stop:
+ ath11k_ahb_stop(ab);
+err_wmi_detach:
+ ath11k_wmi_detach(ab);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_ce_init_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize CE: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init DP: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&ab->core_lock);
+ ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath11k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath11k_core_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath11k_ahb_ext_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath11k_core_stop(ab);
+ ath11k_mac_destroy(ab);
+err_dp_free:
+ ath11k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+ return ret;
+}
+
+static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath11k_ahb_ext_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_ahb_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+ ath11k_hal_srng_deinit(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ return ret;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath11k_core_halt(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+
+ ath11k_mac_scan_finish(ar);
+ ath11k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&ar->arvifs);
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret = 0;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath11k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ath11k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH11K_STATE_OFF:
+ ath11k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH11K_STATE_RESTARTING:
+ break;
+ case ATH11K_STATE_RESTARTED:
+ ar->state = ATH11K_STATE_WEDGED;
+ /* fall through */
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_err(ab, "failed to get rproc\n");
+ return -EINVAL;
+ }
+ ab->tgt_rproc = prproc;
+ ab->hw_params = ath11k_hw_params;
+
+ ret = ath11k_core_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_core_deinit(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath11k_core_pdev_destroy(ab);
+ ath11k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_ahb_power_down(ab);
+ ath11k_mac_destroy(ab);
+ ath11k_core_soc_destroy(ab);
+}
+
+void ath11k_core_free(struct ath11k_base *ab)
+{
+ kfree(ab);
+}
+
+struct ath11k_base *ath11k_core_alloc(struct device *dev)
+{
+ struct ath11k_base *ab;
+
+ ab = kzalloc(sizeof(*ab), GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath11k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ mutex_init(&ab->core_lock);
+ spin_lock_init(&ab->base_lock);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ INIT_WORK(&ab->restart_work, ath11k_core_restart);
+ timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
+ ab->dev = dev;
+
+ return ab;
+
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+
+static int __init ath11k_init(void)
+{
+ int ret;
+
+ ret = ath11k_ahb_init();
+ if (ret)
+ printk(KERN_ERR "failed to register ath11k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+module_init(ath11k_init);
+
+static void __exit ath11k_exit(void)
+{
+ ath11k_ahb_exit();
+}
+module_exit(ath11k_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
new file mode 100644
index 000000000000..25cdcf71d0c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -0,0 +1,826 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CORE_H
+#define ATH11K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH11K_INVALID_HW_MAC_ID 0xFF
+
+enum ath11k_supported_bw {
+ ATH11K_BW_20 = 0,
+ ATH11K_BW_40 = 1,
+ ATH11K_BW_80 = 2,
+ ATH11K_BW_160 = 3,
+};
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH11K_HT_MCS_MAX 7
+#define ATH11K_VHT_MCS_MAX 9
+#define ATH11K_HE_MCS_MAX 11
+
+static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+struct ath11k_skb_cb {
+ dma_addr_t paddr;
+ u8 eid;
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+} __packed;
+
+struct ath11k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+};
+
+enum ath11k_hw_rev {
+ ATH11K_HW_IPQ8074,
+};
+
+enum ath11k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH11K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH11K_FIRMWARE_MODE_FTM,
+};
+
+#define ATH11K_IRQ_NUM_MAX 52
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+#define ATH11K_EXT_IRQ_NUM_MAX 16
+
+extern const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+
+struct ath11k_ext_irq_grp {
+ struct ath11k_base *ab;
+ u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ struct napi_struct napi;
+ struct net_device napi_ndev;
+ /* Queue of pending packets, not expected to be accessed concurrently
+ * to avoid locking overhead.
+ */
+ struct sk_buff_head pending_q;
+};
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath11k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath11k_scan_state {
+ ATH11K_SCAN_IDLE,
+ ATH11K_SCAN_STARTING,
+ ATH11K_SCAN_RUNNING,
+ ATH11K_SCAN_ABORTING,
+};
+
+enum ath11k_dev_flags {
+ ATH11K_CAC_RUNNING,
+ ATH11K_FLAG_CORE_REGISTERED,
+ ATH11K_FLAG_CRASH_FLUSH,
+ ATH11K_FLAG_RAW_MODE,
+ ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ ATH11K_FLAG_BTCOEX,
+ ATH11K_FLAG_RECOVERY,
+ ATH11K_FLAG_UNREGISTERING,
+ ATH11K_FLAG_REGISTERED,
+};
+
+enum ath11k_monitor_flags {
+ ATH11K_FLAG_MONITOR_ENABLED,
+};
+
+struct ath11k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+
+ u16 tx_seq_no;
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+};
+
+struct ath11k_vif_iter {
+ u32 vdev_id;
+ struct ath11k_vif *arvif;
+};
+
+struct ath11k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+};
+
+#define ATH11K_HE_MCS_NUM 12
+#define ATH11K_VHT_MCS_NUM 10
+#define ATH11K_BW_NUM 4
+#define ATH11K_NSS_NUM 4
+#define ATH11K_LEGACY_NUM 12
+#define ATH11K_GI_NUM 4
+#define ATH11K_HT_MCS_NUM 32
+
+enum ath11k_pkt_rx_err {
+ ATH11K_PKT_RX_ERR_FCS,
+ ATH11K_PKT_RX_ERR_TKIP,
+ ATH11K_PKT_RX_ERR_CRYPT,
+ ATH11K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH11K_PKT_RX_ERR_MAX,
+};
+
+enum ath11k_ampdu_subfrm_num {
+ ATH11K_AMPDU_SUBFRM_NUM_10,
+ ATH11K_AMPDU_SUBFRM_NUM_20,
+ ATH11K_AMPDU_SUBFRM_NUM_30,
+ ATH11K_AMPDU_SUBFRM_NUM_40,
+ ATH11K_AMPDU_SUBFRM_NUM_50,
+ ATH11K_AMPDU_SUBFRM_NUM_60,
+ ATH11K_AMPDU_SUBFRM_NUM_MORE,
+ ATH11K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_amsdu_subfrm_num {
+ ATH11K_AMSDU_SUBFRM_NUM_1,
+ ATH11K_AMSDU_SUBFRM_NUM_2,
+ ATH11K_AMSDU_SUBFRM_NUM_3,
+ ATH11K_AMSDU_SUBFRM_NUM_4,
+ ATH11K_AMSDU_SUBFRM_NUM_MORE,
+ ATH11K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_counter_type {
+ ATH11K_COUNTER_TYPE_BYTES,
+ ATH11K_COUNTER_TYPE_PKTS,
+ ATH11K_COUNTER_TYPE_MAX,
+};
+
+enum ath11k_stats_type {
+ ATH11K_STATS_TYPE_SUCC,
+ ATH11K_STATS_TYPE_FAIL,
+ ATH11K_STATS_TYPE_RETRY,
+ ATH11K_STATS_TYPE_AMPDU,
+ ATH11K_STATS_TYPE_MAX,
+};
+
+struct ath11k_htt_data_stats {
+ u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM];
+ u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM];
+ u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM];
+ u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM];
+ u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM];
+ u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM];
+ u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM];
+};
+
+struct ath11k_htt_tx_stats {
+ struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
+struct ath11k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+struct ath11k_sta {
+ struct ath11k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+
+ struct work_struct update_wk;
+ struct ieee80211_tx_info tx_info;
+ struct rate_info txrate;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ath11k_htt_tx_stats *tx_stats;
+ struct ath11k_rx_peer_stats *rx_stats;
+};
+
+#define ATH11K_NUM_CHANS 41
+#define ATH11K_MAX_5G_CHAN 173
+
+enum ath11k_state {
+ ATH11K_STATE_OFF,
+ ATH11K_STATE_ON,
+ ATH11K_STATE_RESTARTING,
+ ATH11K_STATE_RESTARTED,
+ ATH11K_STATE_WEDGED,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH11K_DEFAULT_NOISE_FLOOR -95
+
+struct ath11k_fw_stats {
+ struct dentry *debugfs_fwstats;
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+};
+
+struct ath11k_dbg_htt_stats {
+ u8 type;
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+ /* protects shared stats req buffer */
+ spinlock_t lock;
+};
+
+struct ath11k_debug {
+ struct dentry *debugfs_pdev;
+ struct ath11k_dbg_htt_stats htt_stats;
+ u32 extd_tx_stats;
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+ u32 extd_rx_stats;
+ u32 pktlog_filter;
+ u32 pktlog_mode;
+ u32 pktlog_peer_valid;
+ u8 pktlog_peer_addr[ETH_ALEN];
+};
+
+struct ath11k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u32 duration;
+ u8 ba_fails;
+ bool is_ampdu;
+};
+
+#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
+
+struct ath11k_vdev_stop_status {
+ bool stop_in_progress;
+ u32 vdev_id;
+};
+
+struct ath11k {
+ struct ath11k_base *ab;
+ struct ath11k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath11k_pdev_wmi *wmi;
+ struct ath11k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ struct ath11k_he ar_he;
+ enum ath11k_state state;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath11k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ bool monitor_present;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+ * channel context data, survey info, test mode data.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct ath11k_vdev_stop_status vdev_stop_status;
+ struct completion vdev_setup_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH11K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath11k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath11k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct ath11k_debug debug;
+#endif
+ bool dfs_block_radar_events;
+};
+
+struct ath11k_band_cap {
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath11k_ppe_threshold he_ppet;
+};
+
+struct ath11k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath11k_band_cap band[NUM_NL80211_BANDS];
+};
+
+struct ath11k_pdev {
+ struct ath11k *ar;
+ u32 pdev_id;
+ struct ath11k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+/* IPQ8074 HW channel counters frequency value in hertz */
+#define IPQ8074_CC_FREQ_HERTZ 320000
+
+struct ath11k_soc_dp_rx_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath11k_base {
+ enum ath11k_hw_rev hw_rev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath11k_qmi qmi;
+ struct ath11k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ struct rproc *tgt_rproc;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath11k_htc htc;
+
+ struct ath11k_dp dp;
+
+ void __iomem *mem;
+ unsigned long mem_len;
+
+ const struct ath11k_hif_ops *hif_ops;
+
+ struct ath11k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath11k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath11k_pdev pdevs[MAX_RADIOS];
+ struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ bool wmi_ready;
+ u32 wlan_init_status;
+ int irq_num[ATH11K_IRQ_NUM_MAX];
+ struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ struct napi_struct *napi;
+ struct ath11k_targ_cap target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+ int bd_api;
+ struct ath11k_hw_params hw_params;
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initializatin
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+
+ /* Current DFS Regulatory */
+ enum ath11k_dfs_region dfs_region;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_soc;
+ struct dentry *debugfs_ath11k;
+#endif
+ struct ath11k_soc_dp_rx_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+};
+
+struct ath11k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ /* Cycles spent transmitting frames */
+ u32 tx_frame_count;
+ /* Cycles spent receiving frames */
+ u32 rx_frame_count;
+ /* Total channel busy time, evidently */
+ u32 rx_clear_count;
+ /* Total on-channel time */
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+ /* Num Local frames queued */
+ s32 local_enqued;
+ /* Num Local frames done */
+ s32 local_freed;
+ /* Num queued to HW */
+ s32 hw_queued;
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+ /* Num underruns */
+ s32 underrun;
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+ /* Num MPDUs requed by SW */
+ s32 mpdus_requed;
+ /* excessive retries */
+ u32 tx_ko;
+ /* data hw rate code */
+ u32 data_rc;
+ /* Scheduler self triggers */
+ u32 self_triggers;
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+ /* wal pdev resets */
+ u32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+ /* Number of PHY errors */
+ s32 phy_errs;
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+};
+
+struct ath11k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath11k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_init(struct ath11k_base *ath11k);
+void ath11k_core_deinit(struct ath11k_base *ath11k);
+struct ath11k_base *ath11k_core_alloc(struct device *dev);
+void ath11k_core_free(struct ath11k_base *ath11k);
+int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
+ struct ath11k_board_data *bd);
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
+
+void ath11k_core_halt(struct ath11k *ar);
+u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx);
+
+static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
+{
+ switch (state) {
+ case ATH11K_SCAN_IDLE:
+ return "idle";
+ case ATH11K_SCAN_STARTING:
+ return "starting";
+ case ATH11K_SCAN_RUNNING:
+ return "running";
+ case ATH11K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
+{
+ return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath11k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath11k_vif *)vif->drv_priv;
+}
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
new file mode 100644
index 000000000000..8d485171b0b3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -0,0 +1,1075 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debug_htt_stats.h"
+#include "peer.h"
+
+void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+#ifdef CONFIG_ATH11K_DEBUG
+void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath11k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+
+ /* TODO: trace log */
+
+ va_end(args);
+}
+
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath11k_debug_mask & mask) {
+ if (msg)
+ __ath11k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
+ }
+ }
+}
+
+#endif
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+static void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_debug_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i, ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+ ar->debug.fw_stats_done = true;
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ is_end = ((++num_vdev) == total_vdevs_started ? true : false);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->debug.fw_stats.vdevs);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats.bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ goto complete;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false);
+
+ list_splice_tail_init(&stats.bcn,
+ &ar->debug.fw_stats.bcn);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+complete:
+ complete(&ar->debug.fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_pdevs_free(&stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&stats.vdevs);
+ ath11k_fw_stats_bcn_free(&stats.bcn);
+}
+
+static int ath11k_debug_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * HZ);
+
+ ath11k_debug_fw_stats_reset(ar);
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {0};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 enable, rx_filter = 0, ring_id;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_rx_stats = {
+ .read = ath11k_debug_dump_soc_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debug_pdev_create(struct ath11k_base *ab)
+{
+ ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ if (IS_ERR(ab->debugfs_soc))
+ return PTR_ERR(ab->debugfs_soc);
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab,
+ &fops_soc_rx_stats);
+
+ return 0;
+}
+
+void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_ath11k);
+ ab->debugfs_ath11k = NULL;
+}
+
+int ath11k_debug_soc_create(struct ath11k_base *ab)
+{
+ ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+
+ if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) {
+ if (IS_ERR(ab->debugfs_ath11k))
+ return PTR_ERR(ab->debugfs_ath11k);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath11k_debug_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+void ath11k_debug_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
+
+ init_completion(&ar->debug.fw_stats_complete);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {0};
+ int ret;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ } else {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
+ filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+int ath11k_debug_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+
+ if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) {
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ return -ENOMEM;
+ }
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debug_htt_stats_init(ar);
+
+ ath11k_debug_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ return 0;
+}
+
+void ath11k_debug_unregister(struct ath11k *ar)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
new file mode 100644
index 000000000000..8e8d5588b541
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUG_H_
+#define _ATH11K_DEBUG_H_
+
+#include "hal_tx.h"
+#include "trace.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+enum ath11k_debug_mask {
+ ATH11K_DBG_AHB = 0x00000001,
+ ATH11K_DBG_WMI = 0x00000002,
+ ATH11K_DBG_HTC = 0x00000004,
+ ATH11K_DBG_DP_HTT = 0x00000008,
+ ATH11K_DBG_MAC = 0x00000010,
+ ATH11K_DBG_BOOT = 0x00000020,
+ ATH11K_DBG_QMI = 0x00000040,
+ ATH11K_DBG_DATA = 0x00000080,
+ ATH11K_DBG_MGMT = 0x00000100,
+ ATH11K_DBG_REG = 0x00000200,
+ ATH11K_DBG_TESTMODE = 0x00000400,
+ ATH11k_DBG_HAL = 0x00000800,
+ ATH11K_DBG_ANY = 0xffffffff,
+};
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[0];
+};
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+#define ATH11K_HTT_PKTLOG_MAX_SIZE 2048
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath11k_debug_mask;
+
+#ifdef CONFIG_ATH11K_DEBUG
+__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *fmt, ...);
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH11K_DEBUG */
+static inline int __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUG */
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debug_soc_create(struct ath11k_base *ab);
+void ath11k_debug_soc_destroy(struct ath11k_base *ab);
+int ath11k_debug_pdev_create(struct ath11k_base *ab);
+void ath11k_debug_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debug_register(struct ath11k *ar);
+void ath11k_debug_unregister(struct ath11k *ar);
+void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+
+void ath11k_debug_fw_stats_init(struct ath11k *ar);
+int ath11k_dbg_htt_stats_req(struct ath11k *ar);
+
+static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts);
+#else
+static inline int ath11k_debug_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debug_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debug_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debug_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void
+ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_MAC80211_DEBUGFS*/
+
+#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if (ath11k_debug_mask & dbg_mask) \
+ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH11K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
new file mode 100644
index 000000000000..9939e909628f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
@@ -0,0 +1,4570 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "debug_htt_stats.h"
+
+#define HTT_DBG_OUT(buf, len, fmt, ...) \
+ scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
+
+#define HTT_MAX_STRING_LEN 256
+#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
+
+#define HTT_TLV_HDR_LEN 4
+
+#define ARRAY_TO_STRING(out, arr, len) \
+ do { \
+ int index = 0; u8 i; \
+ for (i = 0; i < len; i++) { \
+ index += snprintf(out + index, HTT_MAX_STRING_LEN - index, \
+ " %u:%u,", i, arr[i]); \
+ if (index < 0 || index >= HTT_MAX_STRING_LEN) \
+ break; \
+ } \
+ } while (0)
+
+static inline void htt_print_stats_string_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ u16 index = 0;
+ char data[HTT_MAX_STRING_LEN] = {0};
+
+ tag_len = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
+
+ for (i = 0; i < tag_len; i++) {
+ index += snprintf(&data[index],
+ HTT_MAX_STRING_LEN - index,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
+ if (index >= HTT_MAX_STRING_LEN)
+ break;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
+ htt_stats_buf->hw_queued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
+ htt_stats_buf->hw_reaped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u",
+ htt_stats_buf->underrun);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u",
+ htt_stats_buf->hw_paused);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u",
+ htt_stats_buf->hw_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u",
+ htt_stats_buf->hw_filt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
+ htt_stats_buf->tx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
+ htt_stats_buf->mpdu_requed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
+ htt_stats_buf->tx_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
+ htt_stats_buf->data_rc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u",
+ htt_stats_buf->cont_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u",
+ htt_stats_buf->tx_timeout);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u",
+ htt_stats_buf->pdev_resets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u",
+ htt_stats_buf->phy_underrun);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u",
+ htt_stats_buf->txop_ovf);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u",
+ htt_stats_buf->seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u",
+ htt_stats_buf->seq_failed_queueing);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u",
+ htt_stats_buf->seq_completed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u",
+ htt_stats_buf->seq_restarted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u",
+ htt_stats_buf->mu_seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u",
+ htt_stats_buf->seq_posted_isr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u",
+ htt_stats_buf->seq_ctrl_cached);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u",
+ htt_stats_buf->mpdu_count_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u",
+ htt_stats_buf->msdu_count_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u",
+ htt_stats_buf->msdu_removed_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u",
+ htt_stats_buf->mpdus_sw_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
+ htt_stats_buf->mpdus_hw_filter);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u",
+ htt_stats_buf->mpdus_truncated);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u",
+ htt_stats_buf->mpdus_ack_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u",
+ htt_stats_buf->mpdus_expired);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u",
+ htt_stats_buf->local_data_enqued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u",
+ htt_stats_buf->local_data_freed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u",
+ htt_stats_buf->mpdu_tried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ htt_stats_buf->tx_active_dur_us_high);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char urrn_stats[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:");
+
+ ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char flush_errs[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:");
+
+ ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sifs_status[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:");
+
+ ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n",
+ sifs_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char phy_errs[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sifs_hist_status[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:");
+
+ ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n",
+ sifs_hist_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:");
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u",
+ htt_stats_buf->num_data_ppdus_legacy_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u",
+ htt_stats_buf->num_data_ppdus_ac_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u",
+ htt_stats_buf->num_data_ppdus_ax_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
+ htt_stats_buf->hist_bin_size);
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(tried_mpdu_cnt_hist,
+ htt_stats_buf->tried_mpdu_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n",
+ tried_mpdu_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER\n");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:");
+ memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
+ HTT_STATS_MAX_HW_INTR_NAME_LEN);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u",
+ htt_stats_buf->mask);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:");
+ memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
+ HTT_STATS_MAX_HW_MODULE_NAME_LEN);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ",
+ hw_module_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
+ htt_stats_buf->tx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u",
+ htt_stats_buf->tx_abort_fail_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u",
+ htt_stats_buf->rx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u",
+ htt_stats_buf->rx_abort_fail_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u",
+ htt_stats_buf->warm_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u",
+ htt_stats_buf->cold_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u",
+ htt_stats_buf->tx_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u",
+ htt_stats_buf->tx_glb_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u",
+ htt_stats_buf->tx_txq_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
+ htt_stats_buf->rx_timeout_reset);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u",
+ htt_stats_buf->last_update_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u",
+ htt_stats_buf->last_add_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u",
+ htt_stats_buf->last_remove_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u",
+ htt_stats_buf->total_processed_msdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u",
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >>
+ 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u",
+ (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >>
+ 20);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u",
+ htt_stats_buf->last_cycle_drop_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n",
+ htt_stats_buf->current_drop_th);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
+ (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
+ 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
+ htt_stats_buf->tid_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
+ htt_stats_buf->hw_queued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
+ htt_stats_buf->hw_reaped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
+ htt_stats_buf->mpdus_hw_filter);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
+ htt_stats_buf->qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
+ htt_stats_buf->qdepth_num_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
+ htt_stats_buf->pause_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
+ (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
+ 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
+ htt_stats_buf->tid_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u",
+ htt_stats_buf->max_qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u",
+ htt_stats_buf->rsvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
+ htt_stats_buf->qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
+ htt_stats_buf->qdepth_num_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
+ htt_stats_buf->pause_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u",
+ htt_stats_buf->block_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x",
+ htt_stats_buf->allow_n_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
+ htt_stats_buf->sendn_frms_allowed);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u",
+ htt_stats_buf->dup_in_reorder);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u",
+ htt_stats_buf->dup_past_outside_window);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u",
+ htt_stats_buf->dup_past_within_window);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
+ htt_stats_buf->rxdesc_err_decrypt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_counter_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_counter_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char counter_name[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:");
+
+ ARRAY_TO_STRING(counter_name,
+ htt_stats_buf->counter_name,
+ HTT_MAX_COUNTER_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u",
+ htt_stats_buf->ppdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u",
+ htt_stats_buf->mpdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u",
+ htt_stats_buf->msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u",
+ htt_stats_buf->pause_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u",
+ htt_stats_buf->block_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d",
+ htt_stats_buf->rssi);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n",
+ htt_stats_buf->inactive_time);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_details_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u",
+ htt_stats_buf->peer_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
+ htt_stats_buf->vdev_pdev_ast_idx & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u",
+ (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x",
+ htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
+ (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF),
+ (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x",
+ htt_stats_buf->peer_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
+ htt_stats_buf->qpeer_flags);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+ u8 j;
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!tx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
+ htt_stats_buf->tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
+ htt_stats_buf->ack_rssi);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j],
+ htt_stats_buf->tx_gi[j],
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_dcm,
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+ char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+ char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rssi_chain[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
+ htt_stats_buf->nsts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
+ htt_stats_buf->rx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
+ htt_stats_buf->rssi_mgmt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
+ htt_stats_buf->rssi_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
+ htt_stats_buf->rssi_comb);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
+ j, rssi_chain[j]);
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rssi_chain[j]);
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(rx_gi[j]);
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__hwq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n",
+ (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ /* TODO: HKDBG */
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__hwq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u",
+ (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u",
+ htt_stats_buf->xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u",
+ htt_stats_buf->underrun_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u",
+ htt_stats_buf->flush_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u",
+ htt_stats_buf->filt_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u",
+ htt_stats_buf->null_mpdu_bmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u",
+ htt_stats_buf->user_ack_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u",
+ htt_stats_buf->sched_id_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u",
+ htt_stats_buf->num_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
+ htt_stats_buf->rts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
+ htt_stats_buf->cts2self);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
+ htt_stats_buf->qos_null);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n",
+ htt_stats_buf->txq_timeout);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
+ char difs_latency_hist[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u",
+ htt_stats_buf->hist_intvl);
+
+ ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist,
+ data_len);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n",
+ difs_latency_hist);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len;
+ char cmd_result[HTT_MAX_STRING_LEN] = {0};
+
+ data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+ char cmd_stall_status[HTT_MAX_STRING_LEN] = {0};
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n",
+ cmd_stall_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+ char fes_result[HTT_MAX_STRING_LEN] = {0};
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = ((tag_len -
+ sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
+ htt_stats_buf->hist_bin_size);
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(tried_mpdu_cnt_hist,
+ htt_stats_buf->tried_mpdu_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "tried_mpdu_cnt_hist = %s\n",
+ tried_mpdu_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = tag_len >> 2;
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:");
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(txop_used_cnt_hist,
+ htt_stats_buf->txop_used_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n",
+ txop_used_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ s32 i;
+ const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ const u32 *cbf_20 = htt_stats_buf->cbf_20;
+ const u32 *cbf_40 = htt_stats_buf->cbf_40;
+ const u32 *cbf_80 = htt_stats_buf->cbf_80;
+ const u32 *cbf_160 = htt_stats_buf->cbf_160;
+
+ if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u",
+ htt_stats_buf->su_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
+ htt_stats_buf->rts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
+ htt_stats_buf->cts2self);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
+ htt_stats_buf->qos_null);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u",
+ htt_stats_buf->delayed_bar_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u",
+ htt_stats_buf->delayed_bar_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u",
+ htt_stats_buf->delayed_bar_3);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u",
+ htt_stats_buf->delayed_bar_4);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u",
+ htt_stats_buf->delayed_bar_5);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u",
+ htt_stats_buf->delayed_bar_6);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
+ htt_stats_buf->delayed_bar_7);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u",
+ htt_stats_buf->ac_su_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u",
+ htt_stats_buf->ac_su_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u",
+ htt_stats_buf->ax_su_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u",
+ htt_stats_buf->ax_su_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u",
+ htt_stats_buf->ax_basic_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u",
+ htt_stats_buf->ax_bsr_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
+ htt_stats_buf->ax_mu_rts_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u",
+ htt_stats_buf->ac_su_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u",
+ htt_stats_buf->ax_su_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:");
+
+ ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n",
+ sched_cmd_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:");
+
+ ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n",
+ sched_cmd_reaped);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_order_su[HTT_MAX_STRING_LEN] = {0};
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_order_su_num_entries =
+ min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:");
+
+ ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n",
+ sched_order_su);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_ineligibility[HTT_MAX_STRING_LEN] = {0};
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:");
+
+ ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n",
+ sched_ineligibility);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__txq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u",
+ (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u",
+ htt_stats_buf->sched_policy);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u",
+ htt_stats_buf->num_active_tids);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u",
+ htt_stats_buf->num_ps_schedules);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u",
+ htt_stats_buf->sched_cmds_pending);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u",
+ htt_stats_buf->num_tid_register);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u",
+ htt_stats_buf->num_tid_unregister);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u",
+ htt_stats_buf->num_qstats_queried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u",
+ htt_stats_buf->qstats_update_pending);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n",
+ htt_stats_buf->notify_sched);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ htt_stats_buf->dur_based_sendn_term);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n",
+ htt_stats_buf->current_timestamp);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n",
+ gen_mpdu_end_reason);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n",
+ list_mpdu_end_reason);
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:");
+
+ ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n",
+ list_mpdu_cnt_hist);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u",
+ htt_stats_buf->msdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u",
+ htt_stats_buf->mpdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u",
+ htt_stats_buf->remove_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u",
+ htt_stats_buf->remove_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u",
+ htt_stats_buf->remove_msdu_ttl);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u",
+ htt_stats_buf->send_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u",
+ htt_stats_buf->bar_sync);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u",
+ htt_stats_buf->notify_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
+ htt_stats_buf->sync_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
+ htt_stats_buf->write_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u",
+ htt_stats_buf->hwsch_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u",
+ htt_stats_buf->gen_list_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
+ htt_stats_buf->remove_msdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
+ htt_stats_buf->flush_cache_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
+ htt_stats_buf->update_mpduq_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u",
+ htt_stats_buf->enqueue);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u",
+ htt_stats_buf->enqueue_notify);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u",
+ htt_stats_buf->sched_udp_notify1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u",
+ htt_stats_buf->sched_udp_notify2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
+ htt_stats_buf->sched_nonudp_notify2);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u",
+ htt_stats_buf->max_cmdq_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u",
+ htt_stats_buf->add_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u",
+ htt_stats_buf->q_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u",
+ htt_stats_buf->q_not_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u",
+ htt_stats_buf->drop_notification);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n",
+ htt_stats_buf->desc_threshold);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u",
+ htt_stats_buf->q_empty_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u",
+ htt_stats_buf->q_not_empty_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n",
+ htt_stats_buf->add_msdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__cmdq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n",
+ (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
+ htt_stats_buf->sync_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
+ htt_stats_buf->write_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
+ htt_stats_buf->remove_msdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
+ htt_stats_buf->flush_cache_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
+ htt_stats_buf->update_mpduq_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
+ htt_stats_buf->update_msduq_cmd);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u",
+ htt_stats_buf->m1_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u",
+ htt_stats_buf->m2_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u",
+ htt_stats_buf->m3_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u",
+ htt_stats_buf->m4_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u",
+ htt_stats_buf->g1_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n",
+ htt_stats_buf->g2_packets);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u",
+ htt_stats_buf->sta_delete_in_progress);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u",
+ htt_stats_buf->invalid_vdev_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u",
+ htt_stats_buf->peer_entry_invalid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u",
+ htt_stats_buf->ethertype_not_ip);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u",
+ htt_stats_buf->eapol_lookup_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u",
+ htt_stats_buf->fse_tid_override);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u",
+ htt_stats_buf->arp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u",
+ htt_stats_buf->igmp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u",
+ htt_stats_buf->dhcp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u",
+ htt_stats_buf->host_inspected);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u",
+ htt_stats_buf->htt_included);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u",
+ htt_stats_buf->htt_valid_mcs);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u",
+ htt_stats_buf->htt_valid_nss);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u",
+ htt_stats_buf->htt_valid_chainmask);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u",
+ htt_stats_buf->htt_valid_retries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u",
+ htt_stats_buf->htt_valid_bw_info);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u",
+ htt_stats_buf->htt_valid_power);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x",
+ htt_stats_buf->htt_valid_key_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u",
+ htt_stats_buf->fse_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u",
+ htt_stats_buf->fse_priority_be);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u",
+ htt_stats_buf->fse_priority_high);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u",
+ htt_stats_buf->fse_priority_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u",
+ htt_stats_buf->fse_hwqueue_created);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u",
+ htt_stats_buf->mcast_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u",
+ htt_stats_buf->bcast_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u",
+ htt_stats_buf->htt_update_peer_cache);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u",
+ htt_stats_buf->htt_learning_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u",
+ htt_stats_buf->fse_invalid_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n",
+ htt_stats_buf->mec_notify);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u",
+ htt_stats_buf->eok);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u",
+ htt_stats_buf->classify_done);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u",
+ htt_stats_buf->lookup_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u",
+ htt_stats_buf->send_host_dhcp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u",
+ htt_stats_buf->send_host_mcast);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u",
+ htt_stats_buf->send_host_unknown_dest);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u",
+ htt_stats_buf->send_host);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n",
+ htt_stats_buf->status_invalid);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u",
+ htt_stats_buf->enqueued_pkts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u",
+ htt_stats_buf->to_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
+ htt_stats_buf->to_tqm_bypass);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u",
+ htt_stats_buf->discarded_pkts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u",
+ htt_stats_buf->local_frames);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n",
+ htt_stats_buf->is_ext_msdu);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u",
+ htt_stats_buf->tcl_dummy_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u",
+ htt_stats_buf->tqm_dummy_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u",
+ htt_stats_buf->tqm_notify_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u",
+ htt_stats_buf->fw2wbm_enq);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
+ htt_stats_buf->tqm_bypass_frame);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = tag_len >> 2;
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(fw2wbm_ring_full_hist,
+ htt_stats_buf->fw2wbm_ring_full_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "fw2wbm_ring_full_hist = %s\n",
+ fw2wbm_ring_full_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u",
+ htt_stats_buf->not_to_fw);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n",
+ htt_stats_buf->invalid_pdev);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
+ char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u",
+ htt_stats_buf->base_addr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
+ htt_stats_buf->elem_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u",
+ htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u",
+ (htt_stats_buf->num_elems__prefetch_tail_idx &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u",
+ htt_stats_buf->head_idx__tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u",
+ (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u",
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u",
+ (htt_stats_buf->shadow_head_idx__shadow_tail_idx &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u",
+ htt_stats_buf->num_tail_incr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u",
+ htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u",
+ (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u",
+ htt_stats_buf->overrun_hit_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u",
+ htt_stats_buf->underrun_hit_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u",
+ htt_stats_buf->prod_blockwait_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u",
+ htt_stats_buf->cons_blockwait_count);
+
+ ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count,
+ HTT_STATS_LOW_WM_BINS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ",
+ low_wm_hit_count);
+
+ ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count,
+ HTT_STATS_HIGH_WM_BINS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n",
+ high_wm_hit_count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:");
+
+ ARRAY_TO_STRING(dwords_used_by_user_n,
+ htt_stats_buf->dwords_used_by_user_n,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n",
+ dwords_used_by_user_n);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u",
+ htt_stats_buf->client_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u",
+ htt_stats_buf->buf_min);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u",
+ htt_stats_buf->buf_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u",
+ htt_stats_buf->buf_busy);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u",
+ htt_stats_buf->buf_alloc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u",
+ htt_stats_buf->buf_avail);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n",
+ htt_stats_buf->num_users);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u",
+ htt_stats_buf->buf_total);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u",
+ htt_stats_buf->mem_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u",
+ htt_stats_buf->deallocate_bufs);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x",
+ htt_stats_buf->base_addr_lsb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x",
+ htt_stats_buf->base_addr_msb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u",
+ htt_stats_buf->ring_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
+ htt_stats_buf->elem_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u",
+ htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u",
+ (htt_stats_buf->num_avail_words__num_valid_words &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u",
+ htt_stats_buf->head_ptr__tail_ptr & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u",
+ (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u",
+ htt_stats_buf->consumer_empty__producer_full & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u",
+ (htt_stats_buf->consumer_empty__producer_full &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u",
+ htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n",
+ (htt_stats_buf->prefetch_count__internal_tail_ptr &
+ 0xFFFF0000) >> 16);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!tx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
+ htt_stats_buf->tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u",
+ htt_stats_buf->rts_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
+ htt_stats_buf->ack_rssi);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
+
+ /* SU GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* AC MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* AX MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* DL OFDMA GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm,
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+fail:
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u16 index = 0;
+ char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+ char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rssi_chain[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_gi[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_pilot_evm_db[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
+ htt_stats_buf->nsts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
+ htt_stats_buf->rx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
+ htt_stats_buf->rssi_mgmt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
+ htt_stats_buf->rssi_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
+ htt_stats_buf->rssi_comb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d",
+ htt_stats_buf->rssi_in_dbm);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u",
+ htt_stats_buf->nss_count);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u",
+ htt_stats_buf->pilot_count);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+
+ for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
+ index += snprintf(&rx_pilot_evm_db[j][index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ",
+ j, rx_pilot_evm_db[j]);
+ }
+
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
+ j, rssi_chain[j]);
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u",
+ htt_stats_buf->txbf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate,
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate,
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ",
+ str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u",
+ htt_stats_buf->ul_ofdma_rx_stbc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u",
+ htt_stats_buf->ul_ofdma_rx_ldpc);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s",
+ str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf);
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x",
+ htt_stats_buf->per_chain_rssi_pkt_type);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf);
+ }
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rssi_chain[j]);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rx_pilot_evm_db[j]);
+
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
+ kfree(rx_gi[i]);
+}
+
+static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:");
+
+ ARRAY_TO_STRING(refill_ring_empty_cnt,
+ htt_stats_buf->refill_ring_empty_cnt,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n",
+ refill_ring_empty_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(rxdma_err_cnt,
+ htt_stats_buf->rxdma_err,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n",
+ rxdma_err_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char reo_err_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(reo_err_cnt,
+ htt_stats_buf->reo_err,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n",
+ reo_err_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u",
+ htt_stats_buf->sample_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u",
+ htt_stats_buf->total_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u",
+ htt_stats_buf->total_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u",
+ htt_stats_buf->total_sample);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u",
+ htt_stats_buf->non_zeros_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u",
+ htt_stats_buf->non_zeros_sample);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u",
+ htt_stats_buf->last_non_zeros_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u",
+ htt_stats_buf->last_non_zeros_min);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u",
+ htt_stats_buf->last_non_zeros_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
+ htt_stats_buf->last_non_zeros_sample);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:");
+
+ ARRAY_TO_STRING(refill_ring_num_refill,
+ htt_stats_buf->refill_ring_num_refill,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n",
+ refill_ring_num_refill);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0};
+ char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u",
+ htt_stats_buf->ppdu_recvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u",
+ htt_stats_buf->udp_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u",
+ htt_stats_buf->other_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ ARRAY_TO_STRING(fw_ring_mgmt_subtype,
+ htt_stats_buf->fw_ring_mgmt_subtype,
+ HTT_STATS_SUBTYPE_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ",
+ fw_ring_mgmt_subtype);
+
+ ARRAY_TO_STRING(fw_ring_ctrl_subtype,
+ htt_stats_buf->fw_ring_ctrl_subtype,
+ HTT_STATS_SUBTYPE_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ",
+ fw_ring_ctrl_subtype);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u",
+ htt_stats_buf->rx_suspend_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u",
+ htt_stats_buf->rx_resume_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u",
+ htt_stats_buf->rx_flush_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(fw_ring_mpdu_err,
+ htt_stats_buf->fw_ring_mpdu_err,
+ HTT_RX_STATS_RXDMA_MAX_ERR);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n",
+ fw_ring_mpdu_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:");
+
+ ARRAY_TO_STRING(fw_mpdu_drop,
+ htt_stats_buf->fw_mpdu_drop,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char phy_errs[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u",
+ htt_stats_buf->mac_id__word);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u",
+ htt_stats_buf->total_phy_err_cnt);
+
+ ARRAY_TO_STRING(phy_errs,
+ htt_stats_buf->phy_err,
+ HTT_STATS_PHY_ERR_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u",
+ htt_stats_buf->chan_num);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u",
+ htt_stats_buf->num_records);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n",
+ htt_stats_buf->collection_interval);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u",
+ htt_stats_buf->hwsch_reset_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u",
+ htt_stats_buf->sch_selfgen_response);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ htt_stats_buf->pdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n",
+ htt_stats_buf->num_sessions);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
+ htt_stats_buf->vdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x",
+ htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24,
+ (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF),
+ (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u",
+ htt_stats_buf->flow_id_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u",
+ htt_stats_buf->dialog_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u",
+ htt_stats_buf->wake_dura_us);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u",
+ htt_stats_buf->wake_intvl_us);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n",
+ htt_stats_buf->sp_offset_us);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u",
+ htt_stats_buf->num_obss_tx_ppdu_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 tlv_len = 0, i = 0, word_len = 0;
+
+ tlv_len = FIELD_GET(HTT_TLV_LEN, *tag_buf) + HTT_TLV_HDR_LEN;
+ word_len = (tlv_len % 4) == 0 ? (tlv_len / 4) : ((tlv_len / 4) + 1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================================");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HKDBG TLV DUMP: (tag_len=%u bytes, words=%u)",
+ tlv_len, word_len);
+
+ for (i = 0; i + 3 < word_len; i += 4) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "0x%08x 0x%08x 0x%08x 0x%08x",
+ tag_buf[i], tag_buf[i + 1],
+ tag_buf[i + 2], tag_buf[i + 3]);
+ }
+
+ if (i + 3 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x 0x%08x ",
+ tag_buf[i], tag_buf[i + 1], tag_buf[i + 2]);
+ } else if (i + 2 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x ",
+ tag_buf[i], tag_buf[i + 1]);
+ } else if (i + 1 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x ",
+ tag_buf[i]);
+ }
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================================");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
+ htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
+ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
+ stats_req);
+ break;
+
+ case HTT_STATS_STRING_TAG:
+ htt_print_stats_string_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMN_TAG:
+ htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
+ htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
+ htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
+ htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
+ htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
+ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
+ htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
+ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CMN_TAG:
+ htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_TAG:
+ htt_print_ring_if_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CMN_TAG:
+ htt_print_sfm_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SRING_STATS_TAG:
+ htt_print_sring_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+ htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
+ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
+ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_STATS_TAG:
+ htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
+ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REFILL_REO_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
+ htt_print_rx_reo_debug_stats_tlv_v(
+ tag_buf, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
+ htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_CMN_TAG:
+ htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_USER_TAG:
+ htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_TAG:
+ htt_print_sfm_client_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SRING_CMN_TAG:
+ htt_print_sring_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+ htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+ htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+ htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_WD_TIMEOUT_TAG:
+ htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_COUNTER_NAME_TAG:
+ htt_print_counter_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_TAG:
+ htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_V1_TAG:
+ htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_TID_DETAILS_TAG:
+ htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_STATS_CMN_TAG:
+ htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_DETAILS_TAG:
+ htt_print_peer_details_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
+ htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_TX_RATE_STATS_TAG:
+ htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_RX_RATE_STATS_TAG:
+ htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+ htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+ htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_WHAL_TX_TAG:
+ htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
+ htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSION_TAG:
+ htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_OBSS_PD_TAG:
+ htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath11k *ar;
+ u32 len;
+ u64 cookie;
+ int ret;
+ u8 pdev_id;
+
+ msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
+ cookie = msg->cookie;
+
+ if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) {
+ ath11k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie);
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ rcu_read_unlock();
+ if (!ar) {
+ ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ return;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ return;
+
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (stats_req->done) {
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ return;
+ }
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+
+ len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ complete(&stats_req->cmpln);
+}
+
+static ssize_t ath11k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
+ return -E2BIG;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO)
+ return -EPERM;
+
+ ar->debug.htt_stats.type = type;
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath11k_read_htt_stats_type,
+ .write = ath11k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
+ const u8 *mac_addr,
+ struct htt_ext_stats_cfg_params *cfg_params)
+{
+ if (!cfg_params)
+ return -EINVAL;
+
+ switch (type) {
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
+ case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+ cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO:
+ case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ u8 type = stats_req->type;
+ u64 cookie = 0;
+ int ret, pdev_id = ar->pdev->pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ init_completion(&stats_req->cmpln);
+
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
+ FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
+
+ ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
+ &cfg_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ ath11k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ }
+
+ return 0;
+}
+
+static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ u8 type = ar->debug.htt_stats.type;
+ int ret;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ ret = ath11k_dbg_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ return ret;
+}
+
+static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath11k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath11k_open_htt_stats,
+ .release = ath11k_release_htt_stats,
+ .read = ath11k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_htt_stats_reset(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .read = ath11k_read_htt_stats_reset,
+ .write = ath11k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debug_htt_stats_init(struct ath11k *ar)
+{
+ spin_lock_init(&ar->debug.htt_stats.lock);
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
new file mode 100644
index 000000000000..4bdb62dd7b8d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -0,0 +1,1662 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+
+enum htt_tlv_tag_t {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_PDEV_PHY_ERR_TAG = 4,
+ HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_HWQ_CMN_TAG = 6,
+ HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG = 7,
+ HTT_STATS_TX_HWQ_CMD_RESULT_TAG = 8,
+ HTT_STATS_TX_HWQ_CMD_STALL_TAG = 9,
+ HTT_STATS_TX_HWQ_FES_STATUS_TAG = 10,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_TQM_CMDQ_STATUS_TAG = 16,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
+ HTT_STATS_RING_IF_TAG = 24,
+ HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
+ HTT_STATS_SFM_CMN_TAG = 26,
+ HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_RX_PDEV_FW_STATS_TAG = 28,
+ HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG = 29,
+ HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG = 30,
+ HTT_STATS_RX_SOC_FW_STATS_TAG = 31,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG = 32,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG = 33,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG = 38,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_RING_IF_CMN_TAG = 40,
+ HTT_STATS_SFM_CLIENT_USER_TAG = 41,
+ HTT_STATS_SFM_CLIENT_TAG = 42,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_SRING_CMN_TAG = 45,
+ HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
+ HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
+ HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
+ HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
+ HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
+ HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG = 51,
+ HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG = 52,
+ HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG = 53,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_WD_TIMEOUT_TAG = 55,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_COUNTER_NAME_TAG = 57,
+ HTT_STATS_TX_TID_DETAILS_TAG = 58,
+ HTT_STATS_RX_TID_DETAILS_TAG = 59,
+ HTT_STATS_PEER_STATS_CMN_TAG = 60,
+ HTT_STATS_PEER_DETAILS_TAG = 61,
+ HTT_STATS_PEER_TX_RATE_STATS_TAG = 62,
+ HTT_STATS_PEER_RX_RATE_STATS_TAG = 63,
+ HTT_STATS_PEER_MSDU_FLOWQ_TAG = 64,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68,
+ HTT_STATS_TX_TID_DETAILS_V1_TAG = 69,
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75,
+ HTT_STATS_PDEV_TWT_SESSION_TAG = 76,
+ HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77,
+ HTT_STATS_RX_REFILL_REO_ERR_TAG = 78,
+ HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
+ HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG = 81,
+ HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG = 82,
+ HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG = 83,
+ HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG = 84,
+ HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG = 85,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define HTT_STATS_MAX_STRING_SZ32 4
+#define HTT_STATS_MACID_INVALID 0xff
+#define HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS 10
+#define HTT_TX_HWQ_MAX_CMD_RESULT_STATS 13
+#define HTT_TX_HWQ_MAX_CMD_STALL_STATS 5
+#define HTT_TX_HWQ_MAX_FES_RESULT_STATS 10
+
+enum htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
+#define HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+#define HTT_RX_STATS_REFILL_MAX_RING 4
+#define HTT_RX_STATS_RXDMA_MAX_ERR 16
+#define HTT_RX_STATS_FW_DROP_REASON_MAX 16
+
+/* Bytes stored in little endian order */
+/* Length should be multiple of DWORD */
+struct htt_stats_string_tlv {
+ u32 data[0]; /* Can be variable length */
+} __packed;
+
+/* == TX PDEV STATS == */
+struct htt_tx_pdev_stats_cmn_tlv {
+ u32 mac_id__word;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 underrun;
+ u32 hw_paused;
+ u32 hw_flush;
+ u32 hw_filt;
+ u32 tx_abort;
+ u32 mpdu_requed;
+ u32 tx_xretry;
+ u32 data_rc;
+ u32 mpdu_dropped_xretry;
+ u32 illgl_rate_phy_err;
+ u32 cont_xretry;
+ u32 tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 seq_switch_hw_paused;
+ u32 next_seq_posted_dsr;
+ u32 seq_posted_isr;
+ u32 seq_ctrl_cached;
+ u32 mpdu_count_tqm;
+ u32 msdu_count_tqm;
+ u32 mpdu_removed_tqm;
+ u32 msdu_removed_tqm;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+ u32 mpdus_seq_hw_retry;
+ u32 ack_tlv_proc;
+ u32 coex_abort_mpdu_cnt_valid;
+ u32 coex_abort_mpdu_cnt;
+ u32 num_total_ppdus_tried_ota;
+ u32 num_data_ppdus_tried_ota;
+ u32 local_ctrl_mgmt_enqued;
+ u32 local_ctrl_mgmt_freed;
+ u32 local_data_enqued;
+ u32 local_data_freed;
+ u32 mpdu_tried;
+ u32 isr_wait_seq_posted;
+
+ u32 tx_active_dur_us_low;
+ u32 tx_active_dur_us_high;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_urrn_tlv_v {
+ u32 urrn_stats[0]; /* HTT_TX_PDEV_MAX_URRN_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_flush_tlv_v {
+ u32 flush_errs[0]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_tlv_v {
+ u32 sifs_status[0]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_phy_err_tlv_v {
+ u32 phy_errs[0]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_hist_tlv_v {
+ u32 sifs_hist_status[0]; /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
+};
+
+struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
+ u32 num_data_ppdus_legacy_su;
+ u32 num_data_ppdus_ac_su;
+ u32 num_data_ppdus_ax_su;
+ u32 num_data_ppdus_ac_su_txbf;
+ u32 num_data_ppdus_ax_su_txbf;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size .
+ *
+ * Tried_mpdu_cnt_hist is the histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the
+ * HW had attempted to transmit on air, for the HWSCH Schedule
+ * command submitted by FW.It is not the retry attempts.
+ * The histogram bins are 0-29, 30-59, 60-89 and so on. The are
+ * 10 bins in this histogram. They are defined in FW using the
+ * following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+};
+
+/* == SOC ERROR STATS == */
+
+/* =============== PDEV ERROR STATS ============== */
+#define HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct htt_hw_stats_intr_misc_tlv {
+ /* Stored as little endian */
+ u8 hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ u32 mask;
+ u32 count;
+};
+
+#define HTT_STATS_MAX_HW_MODULE_NAME_LEN 8
+struct htt_hw_stats_wd_timeout_tlv {
+ /* Stored as little endian */
+ u8 hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN];
+ u32 count;
+};
+
+struct htt_hw_stats_pdev_errs_tlv {
+ u32 mac_id__word; /* BIT [ 7 : 0] : mac_id */
+ u32 tx_abort;
+ u32 tx_abort_fail_count;
+ u32 rx_abort;
+ u32 rx_abort_fail_count;
+ u32 warm_reset;
+ u32 cold_reset;
+ u32 tx_flush;
+ u32 tx_glb_reset;
+ u32 tx_txq_reset;
+ u32 rx_timeout_reset;
+};
+
+struct htt_hw_stats_whal_tx_tlv {
+ u32 mac_id__word;
+ u32 last_unpause_ppdu_id;
+ u32 hwsch_unpause_wait_tqm_write;
+ u32 hwsch_dummy_tlv_skipped;
+ u32 hwsch_misaligned_offset_received;
+ u32 hwsch_reset_count;
+ u32 hwsch_dev_reset_war;
+ u32 hwsch_delayed_pause;
+ u32 hwsch_long_delayed_pause;
+ u32 sch_rx_ppdu_no_response;
+ u32 sch_selfgen_response;
+ u32 sch_rx_sifs_resp_trigger;
+};
+
+/* ============ PEER STATS ============ */
+struct htt_msdu_flow_stats_tlv {
+ u32 last_update_timestamp;
+ u32 last_add_timestamp;
+ u32 last_remove_timestamp;
+ u32 total_processed_msdu_count;
+ u32 cur_msdu_count_in_flowq;
+ u32 sw_peer_id;
+ u32 tx_flow_no__tid_num__drop_rule;
+ u32 last_cycle_enqueue_count;
+ u32 last_cycle_dequeue_count;
+ u32 last_cycle_drop_count;
+ u32 current_drop_th;
+};
+
+#define MAX_HTT_TID_NAME 8
+
+/* Tidq stats */
+struct htt_tx_tid_stats_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 mpdus_hw_filter;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+};
+
+/* Tidq stats */
+struct htt_tx_tid_stats_v1_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 max_qdepth_bytes;
+ u32 max_qdepth_n_msdus;
+ u32 rsvd;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+ u32 allow_n_flags;
+ u32 sendn_frms_allowed;
+};
+
+struct htt_rx_tid_stats_tlv {
+ u32 sw_peer_id__tid_num;
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 dup_in_reorder;
+ u32 dup_past_outside_window;
+ u32 dup_past_within_window;
+ u32 rxdesc_err_decrypt;
+ u32 tid_rx_airtime;
+};
+
+#define HTT_MAX_COUNTER_NAME 8
+struct htt_counter_tlv {
+ u8 counter_name[HTT_MAX_COUNTER_NAME];
+ u32 count;
+};
+
+struct htt_peer_stats_cmn_tlv {
+ u32 ppdu_cnt;
+ u32 mpdu_cnt;
+ u32 msdu_cnt;
+ u32 pause_bitmap;
+ u32 block_bitmap;
+ u32 current_timestamp;
+ u32 peer_tx_airtime;
+ u32 peer_rx_airtime;
+ s32 rssi;
+ u32 peer_enqueued_count_low;
+ u32 peer_enqueued_count_high;
+ u32 peer_dequeued_count_low;
+ u32 peer_dequeued_count_high;
+ u32 peer_dropped_count_low;
+ u32 peer_dropped_count_high;
+ u32 ppdu_transmitted_bytes_low;
+ u32 ppdu_transmitted_bytes_high;
+ u32 peer_ttl_removed_count;
+ u32 inactive_time;
+};
+
+struct htt_peer_details_tlv {
+ u32 peer_type;
+ u32 sw_peer_id;
+ u32 vdev_pdev_ast_idx;
+ struct htt_mac_addr mac_addr;
+ u32 peer_flags;
+ u32 qpeer_flags;
+};
+
+enum htt_stats_param_type {
+ HTT_STATS_PREAM_OFDM,
+ HTT_STATS_PREAM_CCK,
+ HTT_STATS_PREAM_HT,
+ HTT_STATS_PREAM_VHT,
+ HTT_STATS_PREAM_HE,
+ HTT_STATS_PREAM_RSVD,
+ HTT_STATS_PREAM_RSVD1,
+
+ HTT_STATS_PREAM_COUNT,
+};
+
+#define HTT_TX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_tx_peer_rate_stats_tlv {
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_su_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets in each GI
+ * (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS][HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PEER_STATS_NUM_DCM_COUNTERS];
+
+};
+
+#define HTT_RX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_rx_peer_rate_stats_tlv {
+ u32 nsts;
+
+ /* Number of rx ldpc packets */
+ u32 rx_ldpc;
+ /* Number of rx rts packets */
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PEER_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES];
+ /* units = dB above noise floor */
+ u8 rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+
+ /* Counters to track number of rx packets in each GI in each mcs (0-11) */
+ u32 rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+};
+
+enum htt_peer_stats_req_mode {
+ HTT_PEER_STATS_REQ_MODE_NO_QUERY,
+ HTT_PEER_STATS_REQ_MODE_QUERY_TQM,
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM,
+};
+
+enum htt_peer_stats_tlv_enum {
+ HTT_PEER_STATS_CMN_TLV = 0,
+ HTT_PEER_DETAILS_TLV = 1,
+ HTT_TX_PEER_RATE_STATS_TLV = 2,
+ HTT_RX_PEER_RATE_STATS_TLV = 3,
+ HTT_TX_TID_STATS_TLV = 4,
+ HTT_RX_TID_STATS_TLV = 5,
+ HTT_MSDU_FLOW_STATS_TLV = 6,
+
+ HTT_PEER_STATS_MAX_TLV = 31,
+};
+
+/* =========== MUMIMO HWQ stats =========== */
+/* MU MIMO stats per hwQ */
+struct htt_tx_hwq_mu_mimo_sch_stats_tlv {
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ u32 mu_mimo_ppdu_posted;
+};
+
+struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+};
+
+struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
+ u32 mac_id__hwq_id__word;
+};
+
+/* == TX HWQ STATS == */
+struct htt_tx_hwq_stats_cmn_tlv {
+ u32 mac_id__hwq_id__word;
+
+ /* PPDU level stats */
+ u32 xretry;
+ u32 underrun_cnt;
+ u32 flush_cnt;
+ u32 filt_cnt;
+ u32 null_mpdu_bmap;
+ u32 user_ack_failure;
+ u32 ack_tlv_proc;
+ u32 sched_id_proc;
+ u32 null_mpdu_tx_count;
+ u32 mpdu_bmap_not_recvd;
+
+ /* Selfgen stats per hwQ */
+ u32 num_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+
+ /* MPDU level stats */
+ u32 mpdu_tried_cnt;
+ u32 mpdu_queued_cnt;
+ u32 mpdu_ack_fail_cnt;
+ u32 mpdu_filt_cnt;
+ u32 false_mpdu_ack_count;
+
+ u32 txq_timeout;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_difs_latency_stats_tlv_v {
+ u32 hist_intvl;
+ /* histogram of ppdu post to hwsch - > cmd status received */
+ u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_result_stats_tlv_v {
+ /* Histogram of sched cmd result */
+ u32 cmd_result[0]; /* HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_stall_stats_tlv_v {
+ /* Histogram of various pause conitions */
+ u32 cmd_stall_status[0]; /* HTT_TX_HWQ_MAX_CMD_STALL_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_fes_result_stats_tlv_v {
+ /* Histogram of number of user fes result */
+ u32 fes_result[0]; /* HTT_TX_HWQ_MAX_FES_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The hwq_tried_mpdu_cnt_hist is a histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the HW
+ * had attempted to transmit on air, for the HWSCH Schedule command
+ * submitted by FW in this HWQ .It is not the retry attempts. The
+ * histogram bins are 0-29, 30-59, 60-89 and so on. The are 10 bins
+ * in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ /* Histogram of number of mpdus on tried mpdu */
+ u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The txop_used_cnt_hist is the histogram of txop per burst. After
+ * completing the burst, we identify the txop used in the burst and
+ * incr the corresponding bin.
+ * Each bin represents 1ms & we have 10 bins in this histogram.
+ * they are deined in FW using the following macros
+ * #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
+ * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
+ */
+struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
+ /* Histogram of txop used cnt */
+ u32 txop_used_cnt_hist[0]; /* HTT_TX_HWQ_TXOP_USED_CNT_HIST */
+};
+
+/* == TX SELFGEN STATS == */
+struct htt_tx_selfgen_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 su_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+ u32 delayed_bar_1; /* MU user 1 */
+ u32 delayed_bar_2; /* MU user 2 */
+ u32 delayed_bar_3; /* MU user 3 */
+ u32 delayed_bar_4; /* MU user 4 */
+ u32 delayed_bar_5; /* MU user 5 */
+ u32 delayed_bar_6; /* MU user 6 */
+ u32 delayed_bar_7; /* MU user 7 */
+};
+
+struct htt_tx_selfgen_ac_stats_tlv {
+ /* 11AC */
+ u32 ac_su_ndpa;
+ u32 ac_su_ndp;
+ u32 ac_mu_mimo_ndpa;
+ u32 ac_mu_mimo_ndp;
+ u32 ac_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ac_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ac_mu_mimo_brpoll_3; /* MU user 3 */
+};
+
+struct htt_tx_selfgen_ax_stats_tlv {
+ /* 11AX */
+ u32 ax_su_ndpa;
+ u32 ax_su_ndp;
+ u32 ax_mu_mimo_ndpa;
+ u32 ax_mu_mimo_ndp;
+ u32 ax_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ax_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ax_mu_mimo_brpoll_3; /* MU user 3 */
+ u32 ax_mu_mimo_brpoll_4; /* MU user 4 */
+ u32 ax_mu_mimo_brpoll_5; /* MU user 5 */
+ u32 ax_mu_mimo_brpoll_6; /* MU user 6 */
+ u32 ax_mu_mimo_brpoll_7; /* MU user 7 */
+ u32 ax_basic_trigger;
+ u32 ax_bsr_trigger;
+ u32 ax_mu_bar_trigger;
+ u32 ax_mu_rts_trigger;
+};
+
+struct htt_tx_selfgen_ac_err_stats_tlv {
+ /* 11AC error stats */
+ u32 ac_su_ndp_err;
+ u32 ac_su_ndpa_err;
+ u32 ac_mu_mimo_ndpa_err;
+ u32 ac_mu_mimo_ndp_err;
+ u32 ac_mu_mimo_brp1_err;
+ u32 ac_mu_mimo_brp2_err;
+ u32 ac_mu_mimo_brp3_err;
+};
+
+struct htt_tx_selfgen_ax_err_stats_tlv {
+ /* 11AX error stats */
+ u32 ax_su_ndp_err;
+ u32 ax_su_ndpa_err;
+ u32 ax_mu_mimo_ndpa_err;
+ u32 ax_mu_mimo_ndp_err;
+ u32 ax_mu_mimo_brp1_err;
+ u32 ax_mu_mimo_brp2_err;
+ u32 ax_mu_mimo_brp3_err;
+ u32 ax_mu_mimo_brp4_err;
+ u32 ax_mu_mimo_brp5_err;
+ u32 ax_mu_mimo_brp6_err;
+ u32 ax_mu_mimo_brp7_err;
+ u32 ax_basic_trigger_err;
+ u32 ax_bsr_trigger_err;
+ u32 ax_mu_bar_trigger_err;
+ u32 ax_mu_rts_trigger_err;
+};
+
+/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+
+struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
+ /* mu-mimo sw sched cmd stats */
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ /* MU PPDU stats per hwQ */
+ u32 mu_mimo_ppdu_posted;
+ /*
+ * Counts the number of users in each transmission of
+ * the given TX mode.
+ *
+ * Index is the number of users - 1.
+ */
+ u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+ u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_mu_mimo_mpdus_queued_usr;
+ u32 ax_mu_mimo_mpdus_tried_usr;
+ u32 ax_mu_mimo_mpdus_failed_usr;
+ u32 ax_mu_mimo_mpdus_requeued_usr;
+ u32 ax_mu_mimo_err_no_ba_usr;
+ u32 ax_mu_mimo_mpdu_underrun_usr;
+ u32 ax_mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_ofdma_mpdus_queued_usr;
+ u32 ax_ofdma_mpdus_tried_usr;
+ u32 ax_ofdma_mpdus_failed_usr;
+ u32 ax_ofdma_mpdus_requeued_usr;
+ u32 ax_ofdma_err_no_ba_usr;
+ u32 ax_ofdma_mpdu_underrun_usr;
+ u32 ax_ofdma_ampdu_underrun_usr;
+};
+
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3
+
+struct htt_tx_pdev_mpdu_stats_tlv {
+ /* mpdu level stats */
+ u32 mpdus_queued_usr;
+ u32 mpdus_tried_usr;
+ u32 mpdus_failed_usr;
+ u32 mpdus_requeued_usr;
+ u32 err_no_ba_usr;
+ u32 mpdu_underrun_usr;
+ u32 ampdu_underrun_usr;
+ u32 user_index;
+ u32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+};
+
+/* == TX SCHED STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_posted_tlv_v {
+ u32 sched_cmd_posted[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_reaped_tlv_v {
+ u32 sched_cmd_reaped[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_order_su_tlv_v {
+ u32 sched_order_su[0]; /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
+};
+
+enum htt_sched_txq_sched_ineligibility_tlv_enum {
+ HTT_SCHED_TID_SKIP_SCHED_MASK_DISABLED = 0,
+ HTT_SCHED_TID_SKIP_NOTIFY_MPDU,
+ HTT_SCHED_TID_SKIP_MPDU_STATE_INVALID,
+ HTT_SCHED_TID_SKIP_SCHED_DISABLED,
+ HTT_SCHED_TID_SKIP_TQM_BYPASS_CMD_PENDING,
+ HTT_SCHED_TID_SKIP_SECOND_SU_SCHEDULE,
+
+ HTT_SCHED_TID_SKIP_CMD_SLOT_NOT_AVAIL,
+ HTT_SCHED_TID_SKIP_NO_ENQ,
+ HTT_SCHED_TID_SKIP_LOW_ENQ,
+ HTT_SCHED_TID_SKIP_PAUSED,
+ HTT_SCHED_TID_SKIP_UL,
+ HTT_SCHED_TID_REMOVE_PAUSED,
+ HTT_SCHED_TID_REMOVE_NO_ENQ,
+ HTT_SCHED_TID_REMOVE_UL,
+ HTT_SCHED_TID_QUERY,
+ HTT_SCHED_TID_SU_ONLY,
+ HTT_SCHED_TID_ELIGIBLE,
+ HTT_SCHED_INELIGIBILITY_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_ineligibility_tlv_v {
+ /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
+ u32 sched_ineligibility[0];
+};
+
+struct htt_tx_pdev_stats_sched_per_txq_tlv {
+ u32 mac_id__txq_id__word;
+ u32 sched_policy;
+ u32 last_sched_cmd_posted_timestamp;
+ u32 last_sched_cmd_compl_timestamp;
+ u32 sched_2_tac_lwm_count;
+ u32 sched_2_tac_ring_full;
+ u32 sched_cmd_post_failure;
+ u32 num_active_tids;
+ u32 num_ps_schedules;
+ u32 sched_cmds_pending;
+ u32 num_tid_register;
+ u32 num_tid_unregister;
+ u32 num_qstats_queried;
+ u32 qstats_update_pending;
+ u32 last_qstats_query_timestamp;
+ u32 num_tqm_cmdq_full;
+ u32 num_de_sched_algo_trigger;
+ u32 num_rt_sched_algo_trigger;
+ u32 num_tqm_sched_algo_trigger;
+ u32 notify_sched;
+ u32 dur_based_sendn_term;
+};
+
+struct htt_stats_tx_sched_cmn_tlv {
+ /* BIT [ 7 : 0] :- mac_id
+ * BIT [31 : 8] :- reserved
+ */
+ u32 mac_id__word;
+ /* Current timestamp */
+ u32 current_timestamp;
+};
+
+/* == TQM STATS == */
+#define HTT_TX_TQM_MAX_GEN_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
+ u32 gen_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_stats_tlv_v {
+ u32 list_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
+ u32 list_mpdu_cnt_hist[0];
+ /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
+};
+
+struct htt_tx_tqm_pdev_stats_tlv_v {
+ u32 msdu_count;
+ u32 mpdu_count;
+ u32 remove_msdu;
+ u32 remove_mpdu;
+ u32 remove_msdu_ttl;
+ u32 send_bar;
+ u32 bar_sync;
+ u32 notify_mpdu;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 hwsch_trigger;
+ u32 ack_tlv_proc;
+ u32 gen_mpdu_cmd;
+ u32 gen_list_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_mpdu_tried_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_msdu_cmd;
+ u32 remove_msdu_ttl_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 enqueue;
+ u32 enqueue_notify;
+ u32 notify_mpdu_at_head;
+ u32 notify_mpdu_state_valid;
+ /*
+ * On receiving TQM_FLOW_NOT_EMPTY_STATUS from TQM, (on MSDUs being enqueued
+ * the flow is non empty), if the number of MSDUs is greater than the threshold,
+ * notify is incremented. UDP_THRESH counters are for UDP MSDUs, and NONUDP are
+ * for non-UDP MSDUs.
+ * MSDUQ_SWNOTIFY_UDP_THRESH1 threshold - sched_udp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_UDP_THRESH2 threshold - sched_udp_notify2 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH1 threshold - sched_nonudp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH2 threshold - sched_nonudp_notify2 is incremented
+ *
+ * Notify signifies that we trigger the scheduler.
+ */
+ u32 sched_udp_notify1;
+ u32 sched_udp_notify2;
+ u32 sched_nonudp_notify1;
+ u32 sched_nonudp_notify2;
+};
+
+struct htt_tx_tqm_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 max_cmdq_id;
+ u32 list_mpdu_cnt_hist_intvl;
+
+ /* Global stats */
+ u32 add_msdu;
+ u32 q_empty;
+ u32 q_not_empty;
+ u32 drop_notification;
+ u32 desc_threshold;
+};
+
+struct htt_tx_tqm_error_stats_tlv {
+ /* Error stats */
+ u32 q_empty_failure;
+ u32 q_not_empty_failure;
+ u32 add_msdu_failure;
+};
+
+/* == TQM CMDQ stats == */
+struct htt_tx_tqm_cmdq_status_tlv {
+ u32 mac_id__cmdq_id__word;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 gen_mpdu_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_msdu_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 update_msduq_cmd;
+};
+
+/* == TX-DE STATS == */
+/* Structures for tx de stats */
+struct htt_tx_de_eapol_packets_stats_tlv {
+ u32 m1_packets;
+ u32 m2_packets;
+ u32 m3_packets;
+ u32 m4_packets;
+ u32 g1_packets;
+ u32 g2_packets;
+};
+
+struct htt_tx_de_classify_failed_stats_tlv {
+ u32 ap_bss_peer_not_found;
+ u32 ap_bcast_mcast_no_peer;
+ u32 sta_delete_in_progress;
+ u32 ibss_no_bss_peer;
+ u32 invalid_vdev_type;
+ u32 invalid_ast_peer_entry;
+ u32 peer_entry_invalid;
+ u32 ethertype_not_ip;
+ u32 eapol_lookup_failed;
+ u32 qpeer_not_allow_data;
+ u32 fse_tid_override;
+ u32 ipv6_jumbogram_zero_length;
+ u32 qos_to_non_qos_in_prog;
+};
+
+struct htt_tx_de_classify_stats_tlv {
+ u32 arp_packets;
+ u32 igmp_packets;
+ u32 dhcp_packets;
+ u32 host_inspected;
+ u32 htt_included;
+ u32 htt_valid_mcs;
+ u32 htt_valid_nss;
+ u32 htt_valid_preamble_type;
+ u32 htt_valid_chainmask;
+ u32 htt_valid_guard_interval;
+ u32 htt_valid_retries;
+ u32 htt_valid_bw_info;
+ u32 htt_valid_power;
+ u32 htt_valid_key_flags;
+ u32 htt_valid_no_encryption;
+ u32 fse_entry_count;
+ u32 fse_priority_be;
+ u32 fse_priority_high;
+ u32 fse_priority_low;
+ u32 fse_traffic_ptrn_be;
+ u32 fse_traffic_ptrn_over_sub;
+ u32 fse_traffic_ptrn_bursty;
+ u32 fse_traffic_ptrn_interactive;
+ u32 fse_traffic_ptrn_periodic;
+ u32 fse_hwqueue_alloc;
+ u32 fse_hwqueue_created;
+ u32 fse_hwqueue_send_to_host;
+ u32 mcast_entry;
+ u32 bcast_entry;
+ u32 htt_update_peer_cache;
+ u32 htt_learning_frame;
+ u32 fse_invalid_peer;
+ /*
+ * mec_notify is HTT TX WBM multicast echo check notification
+ * from firmware to host. FW sends SA addresses to host for all
+ * multicast/broadcast packets received on STA side.
+ */
+ u32 mec_notify;
+};
+
+struct htt_tx_de_classify_status_stats_tlv {
+ u32 eok;
+ u32 classify_done;
+ u32 lookup_failed;
+ u32 send_host_dhcp;
+ u32 send_host_mcast;
+ u32 send_host_unknown_dest;
+ u32 send_host;
+ u32 status_invalid;
+};
+
+struct htt_tx_de_enqueue_packets_stats_tlv {
+ u32 enqueued_pkts;
+ u32 to_tqm;
+ u32 to_tqm_bypass;
+};
+
+struct htt_tx_de_enqueue_discard_stats_tlv {
+ u32 discarded_pkts;
+ u32 local_frames;
+ u32 is_ext_msdu;
+};
+
+struct htt_tx_de_compl_stats_tlv {
+ u32 tcl_dummy_frame;
+ u32 tqm_dummy_frame;
+ u32 tqm_notify_frame;
+ u32 fw2wbm_enq;
+ u32 tqm_bypass_frame;
+};
+
+/*
+ * The htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited
+ * for the fw2wbm ring buffer. we are requesting a buffer in FW2WBM release
+ * ring,which may fail, due to non availability of buffer. Hence we sleep for
+ * 200us & again request for it. This is a histogram of time we wait, with
+ * bin of 200ms & there are 10 bin (2 seconds max)
+ * They are defined by the following macros in FW
+ * #define ENTRIES_PER_BIN_COUNT 1000 // per bin 1000 * 200us = 200ms
+ * #define RING_FULL_BIN_ENTRIES (WAL_TX_DE_FW2WBM_ALLOC_TIMEOUT_COUNT /
+ * ENTRIES_PER_BIN_COUNT)
+ */
+struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
+ u32 fw2wbm_ring_full_hist[0];
+};
+
+struct htt_tx_de_cmn_stats_tlv {
+ u32 mac_id__word;
+
+ /* Global Stats */
+ u32 tcl2fw_entry_count;
+ u32 not_to_fw;
+ u32 invalid_pdev_vdev_peer;
+ u32 tcl_res_invalid_addrx;
+ u32 wbm2fw_entry_count;
+ u32 invalid_pdev;
+};
+
+/* == RING-IF STATS == */
+#define HTT_STATS_LOW_WM_BINS 5
+#define HTT_STATS_HIGH_WM_BINS 5
+
+struct htt_ring_if_stats_tlv {
+ u32 base_addr; /* DWORD aligned base memory address of the ring */
+ u32 elem_size;
+ u32 num_elems__prefetch_tail_idx;
+ u32 head_idx__tail_idx;
+ u32 shadow_head_idx__shadow_tail_idx;
+ u32 num_tail_incr;
+ u32 lwm_thresh__hwm_thresh;
+ u32 overrun_hit_count;
+ u32 underrun_hit_count;
+ u32 prod_blockwait_count;
+ u32 cons_blockwait_count;
+ u32 low_wm_hit_count[HTT_STATS_LOW_WM_BINS];
+ u32 high_wm_hit_count[HTT_STATS_HIGH_WM_BINS];
+};
+
+struct htt_ring_if_cmn_tlv {
+ u32 mac_id__word;
+ u32 num_records;
+};
+
+/* == SFM STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sfm_client_user_tlv_v {
+ /* Number of DWORDS used per user and per client */
+ u32 dwords_used_by_user_n[0];
+};
+
+struct htt_sfm_client_tlv {
+ /* Client ID */
+ u32 client_id;
+ /* Minimum number of buffers */
+ u32 buf_min;
+ /* Maximum number of buffers */
+ u32 buf_max;
+ /* Number of Busy buffers */
+ u32 buf_busy;
+ /* Number of Allocated buffers */
+ u32 buf_alloc;
+ /* Number of Available/Usable buffers */
+ u32 buf_avail;
+ /* Number of users */
+ u32 num_users;
+};
+
+struct htt_sfm_cmn_tlv {
+ u32 mac_id__word;
+ /* Indicates the total number of 128 byte buffers
+ * in the CMEM that are available for buffer sharing
+ */
+ u32 buf_total;
+ /* Indicates for certain client or all the clients
+ * there is no dowrd saved in SFM, refer to SFM_R1_MEM_EMPTY
+ */
+ u32 mem_empty;
+ /* DEALLOCATE_BUFFERS, refer to register SFM_R0_DEALLOCATE_BUFFERS */
+ u32 deallocate_bufs;
+ /* Number of Records */
+ u32 num_records;
+};
+
+/* == SRNG STATS == */
+struct htt_sring_stats_tlv {
+ u32 mac_id__ring_id__arena__ep;
+ u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
+ u32 base_addr_msb;
+ u32 ring_size;
+ u32 elem_size;
+
+ u32 num_avail_words__num_valid_words;
+ u32 head_ptr__tail_ptr;
+ u32 consumer_empty__producer_full;
+ u32 prefetch_count__internal_tail_ptr;
+};
+
+struct htt_sring_cmn_tlv {
+ u32 num_records;
+};
+
+/* == PDEV TX RATE CTRL STATS == */
+#define HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_LTF 4
+
+#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS)
+
+struct htt_tx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets
+ * in each GI (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ /* Number of CTS-acknowledged RTS packets */
+ u32 rts_success;
+
+ /*
+ * Counters for legacy 11a and 11b transmissions.
+ *
+ * The index corresponds to:
+ *
+ * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+ *
+ * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+ * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+ */
+ u32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+ u32 ac_mu_mimo_tx_ldpc;
+ u32 ax_mu_mimo_tx_ldpc;
+ u32 ofdma_tx_ldpc;
+
+ /*
+ * Counters for 11ax HE LTF selection during TX.
+ *
+ * The index corresponds to:
+ *
+ * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+ */
+ u32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+ u32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+ u32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+};
+
+/* == PDEV RX RATE CTRL STATS == */
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+
+struct htt_rx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 nsts;
+
+ u32 rx_ldpc;
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ /* units = dB above noise floor */
+
+ /* Counters to track number of rx packets
+ * in each GI in each mcs (0-11)
+ */
+ u32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ s32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */
+
+ u32 rx_11ax_su_ext;
+ u32 rx_11ac_mumimo;
+ u32 rx_11ax_mumimo;
+ u32 rx_11ax_ofdma;
+ u32 txbf;
+ u32 rx_legacy_cck_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 rx_legacy_ofdm_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ u32 rx_active_dur_us_low;
+ u32 rx_active_dur_us_high;
+
+ u32 rx_11ax_ul_ofdma;
+
+ u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ul_ofdma_rx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ul_ofdma_rx_stbc;
+ u32 ul_ofdma_rx_ldpc;
+
+ /* record the stats for each user index */
+ u32 rx_ulofdma_non_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_mpdu_ok[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+ u32 rx_ulofdma_mpdu_fail[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+
+ u32 nss_count;
+ u32 pilot_count;
+ /* RxEVM stats in dB */
+ s32 rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS];
+ /* rx_pilot_evm_db_mean:
+ * EVM mean across pilots, computed as
+ * mean(10*log10(rx_pilot_evm_linear)) = mean(rx_pilot_evm_db)
+ */
+ s32 rx_pilot_evm_db_mean[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* dBm units */
+ /* per_chain_rssi_pkt_type:
+ * This field shows what type of rx frame the per-chain RSSI was computed
+ * on, by recording the frame type and sub-type as bit-fields within this
+ * field:
+ * BIT [3 : 0] :- IEEE80211_FC0_TYPE
+ * BIT [7 : 4] :- IEEE80211_FC0_SUBTYPE
+ * BIT [31 : 8] :- Reserved
+ */
+ u32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+};
+
+/* == RX PDEV/SOC STATS == */
+struct htt_rx_soc_fw_stats_tlv {
+ u32 fw_reo_ring_data_msdu;
+ u32 fw_to_host_data_msdu_bcmc;
+ u32 fw_to_host_data_msdu_uc;
+ u32 ofld_remote_data_buf_recycle_cnt;
+ u32 ofld_remote_free_buf_indication_cnt;
+
+ u32 ofld_buf_to_host_data_msdu_uc;
+ u32 reo_fw_ring_to_host_data_msdu_uc;
+
+ u32 wbm_sw_ring_reap;
+ u32 wbm_forward_to_host_cnt;
+ u32 wbm_target_recycle_cnt;
+
+ u32 target_refill_ring_recycle_cnt;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
+ u32 refill_ring_empty_cnt[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
+ u32 refill_ring_num_refill[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* RXDMA error code from WBM released packets */
+enum htt_rx_rxdma_error_code_enum {
+ HTT_RX_RXDMA_OVERFLOW_ERR = 0,
+ HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1,
+ HTT_RX_RXDMA_FCS_ERR = 2,
+ HTT_RX_RXDMA_DECRYPT_ERR = 3,
+ HTT_RX_RXDMA_TKIP_MIC_ERR = 4,
+ HTT_RX_RXDMA_UNECRYPTED_ERR = 5,
+ HTT_RX_RXDMA_MSDU_LEN_ERR = 6,
+ HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7,
+ HTT_RX_RXDMA_WIFI_PARSE_ERR = 8,
+ HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9,
+ HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10,
+ HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11,
+ HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12,
+ HTT_RX_RXDMA_FLUSH_REQUEST = 13,
+ HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14,
+ HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_RXDMA_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
+ u32 rxdma_err[0]; /* HTT_RX_RXDMA_MAX_ERR_CODE */
+};
+
+/* REO error code from WBM released packets */
+enum htt_rx_reo_error_code_enum {
+ HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0,
+ HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1,
+ HTT_RX_AMPDU_IN_NON_BA = 2,
+ HTT_RX_NON_BA_DUPLICATE = 3,
+ HTT_RX_BA_DUPLICATE = 4,
+ HTT_RX_REGULAR_FRAME_2K_JUMP = 5,
+ HTT_RX_BAR_FRAME_2K_JUMP = 6,
+ HTT_RX_REGULAR_FRAME_OOR = 7,
+ HTT_RX_BAR_FRAME_OOR = 8,
+ HTT_RX_BAR_FRAME_NO_BA_SESSION = 9,
+ HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10,
+ HTT_RX_PN_CHECK_FAILED = 11,
+ HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12,
+ HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13,
+ HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14,
+ HTT_RX_REO_ERR_CODE_RVSD = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_REO_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
+ u32 reo_err[0]; /* HTT_RX_REO_MAX_ERR_CODE */
+};
+
+/* == RX PDEV STATS == */
+#define HTT_STATS_SUBTYPE_MAX 16
+
+struct htt_rx_pdev_fw_stats_tlv {
+ u32 mac_id__word;
+ u32 ppdu_recvd;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 tcp_msdu_cnt;
+ u32 tcp_ack_msdu_cnt;
+ u32 udp_msdu_cnt;
+ u32 other_msdu_cnt;
+ u32 fw_ring_mpdu_ind;
+ u32 fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_mcast_data_msdu;
+ u32 fw_ring_bcast_data_msdu;
+ u32 fw_ring_ucast_data_msdu;
+ u32 fw_ring_null_data_msdu;
+ u32 fw_ring_mpdu_drop;
+ u32 ofld_local_data_ind_cnt;
+ u32 ofld_local_data_buf_recycle_cnt;
+ u32 drx_local_data_ind_cnt;
+ u32 drx_local_data_buf_recycle_cnt;
+ u32 local_nondata_ind_cnt;
+ u32 local_nondata_buf_recycle_cnt;
+
+ u32 fw_status_buf_ring_refill_cnt;
+ u32 fw_status_buf_ring_empty_cnt;
+ u32 fw_pkt_buf_ring_refill_cnt;
+ u32 fw_pkt_buf_ring_empty_cnt;
+ u32 fw_link_buf_ring_refill_cnt;
+ u32 fw_link_buf_ring_empty_cnt;
+
+ u32 host_pkt_buf_ring_refill_cnt;
+ u32 host_pkt_buf_ring_empty_cnt;
+ u32 mon_pkt_buf_ring_refill_cnt;
+ u32 mon_pkt_buf_ring_empty_cnt;
+ u32 mon_status_buf_ring_refill_cnt;
+ u32 mon_status_buf_ring_empty_cnt;
+ u32 mon_desc_buf_ring_refill_cnt;
+ u32 mon_desc_buf_ring_empty_cnt;
+ u32 mon_dest_ring_update_cnt;
+ u32 mon_dest_ring_full_cnt;
+
+ u32 rx_suspend_cnt;
+ u32 rx_suspend_fail_cnt;
+ u32 rx_resume_cnt;
+ u32 rx_resume_fail_cnt;
+ u32 rx_ring_switch_cnt;
+ u32 rx_ring_restore_cnt;
+ u32 rx_flush_cnt;
+ u32 rx_recovery_reset_cnt;
+};
+
+#define HTT_STATS_PHY_ERR_MAX 43
+
+struct htt_rx_pdev_fw_stats_phy_err_tlv {
+ u32 mac_id__word;
+ u32 total_phy_err_cnt;
+ /* Counts of different types of phy errs
+ * The mapping of PHY error types to phy_err array elements is HW dependent.
+ * The only currently-supported mapping is shown below:
+ *
+ * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+ * 1 phyrx_err_synth_off
+ * 2 phyrx_err_ofdma_timing
+ * 3 phyrx_err_ofdma_signal_parity
+ * 4 phyrx_err_ofdma_rate_illegal
+ * 5 phyrx_err_ofdma_length_illegal
+ * 6 phyrx_err_ofdma_restart
+ * 7 phyrx_err_ofdma_service
+ * 8 phyrx_err_ppdu_ofdma_power_drop
+ * 9 phyrx_err_cck_blokker
+ * 10 phyrx_err_cck_timing
+ * 11 phyrx_err_cck_header_crc
+ * 12 phyrx_err_cck_rate_illegal
+ * 13 phyrx_err_cck_length_illegal
+ * 14 phyrx_err_cck_restart
+ * 15 phyrx_err_cck_service
+ * 16 phyrx_err_cck_power_drop
+ * 17 phyrx_err_ht_crc_err
+ * 18 phyrx_err_ht_length_illegal
+ * 19 phyrx_err_ht_rate_illegal
+ * 20 phyrx_err_ht_zlf
+ * 21 phyrx_err_false_radar_ext
+ * 22 phyrx_err_green_field
+ * 23 phyrx_err_bw_gt_dyn_bw
+ * 24 phyrx_err_leg_ht_mismatch
+ * 25 phyrx_err_vht_crc_error
+ * 26 phyrx_err_vht_siga_unsupported
+ * 27 phyrx_err_vht_lsig_len_invalid
+ * 28 phyrx_err_vht_ndp_or_zlf
+ * 29 phyrx_err_vht_nsym_lt_zero
+ * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+ * 31 phyrx_err_vht_rx_skip_group_id0
+ * 32 phyrx_err_vht_rx_skip_group_id1to62
+ * 33 phyrx_err_vht_rx_skip_group_id63
+ * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+ * 35 phyrx_err_defer_nap
+ * 36 phyrx_err_fdomain_timeout
+ * 37 phyrx_err_lsig_rel_check
+ * 38 phyrx_err_bt_collision
+ * 39 phyrx_err_unsupported_mu_feedback
+ * 40 phyrx_err_ppdu_tx_interrupt_rx
+ * 41 phyrx_err_unsupported_cbf
+ * 42 phyrx_err_other
+ */
+ u32 phy_err[HTT_STATS_PHY_ERR_MAX];
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
+ /* Num error MPDU for each RxDMA error type */
+ u32 fw_ring_mpdu_err[0]; /* HTT_RX_STATS_RXDMA_MAX_ERR */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
+ /* Num MPDU dropped */
+ u32 fw_mpdu_drop[0]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */
+};
+
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80)
+
+struct htt_pdev_stats_cca_counters_tlv {
+ /* Below values are obtained from the HW Cycles counter registers */
+ u32 tx_frame_usec;
+ u32 rx_frame_usec;
+ u32 rx_clear_usec;
+ u32 my_rx_frame_usec;
+ u32 usec_cnt;
+ u32 med_rx_idle_usec;
+ u32 med_tx_idle_global_usec;
+ u32 cca_obss_usec;
+};
+
+struct htt_pdev_cca_stats_hist_v1_tlv {
+ u32 chan_num;
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ u32 num_records;
+ u32 valid_cca_counters_bitmap;
+ u32 collection_interval;
+
+ /* This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+ */
+};
+
+struct htt_pdev_stats_twt_session_tlv {
+ u32 vdev_id;
+ struct htt_mac_addr peer_mac;
+ u32 flow_id_flags;
+
+ /* TWT_DIALOG_ID_UNAVAILABLE is used
+ * when TWT session is not initiated by host
+ */
+ u32 dialog_id;
+ u32 wake_dura_us;
+ u32 wake_intvl_us;
+ u32 sp_offset_us;
+};
+
+struct htt_pdev_stats_twt_sessions_tlv {
+ u32 pdev_id;
+ u32 num_sessions;
+ struct htt_pdev_stats_twt_session_tlv twt_session[0];
+};
+
+enum htt_rx_reo_resource_sample_id_enum {
+ /* Global link descriptor queued in REO */
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2,
+ /*Number of queue descriptors of this aging group */
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6,
+ /* Total number of MSDUs buffered in AC */
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10,
+
+ HTT_RX_REO_RESOURCE_STATS_MAX = 16
+};
+
+struct htt_rx_reo_resource_stats_tlv_v {
+ /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+ u32 sample_id;
+ u32 total_max;
+ u32 total_avg;
+ u32 total_sample;
+ u32 non_zeros_avg;
+ u32 non_zeros_sample;
+ u32 last_non_zeros_max;
+ u32 last_non_zeros_min;
+ u32 last_non_zeros_avg;
+ u32 last_non_zeros_sample;
+};
+
+/* == TX SOUNDING STATS == */
+
+enum htt_txbf_sound_steer_modes {
+ HTT_IMPLICIT_TXBF_STEER_STATS = 0,
+ HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS = 1,
+ HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS = 2,
+ HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS = 3,
+ HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS = 4,
+ HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum htt_stats_sounding_tx_mode {
+ HTT_TX_AC_SOUNDING_MODE = 0,
+ HTT_TX_AX_SOUNDING_MODE = 1,
+};
+
+struct htt_tx_sounding_stats_tlv {
+ u32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */
+ /* Counts number of soundings for all steering modes in each bw */
+ u32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES];
+ /*
+ * The sounding array is a 2-D array stored as an 1-D array of
+ * u32. The stats for a particular user/bw combination is
+ * referenced with the following:
+ *
+ * sounding[(user* max_bw) + bw]
+ *
+ * ... where max_bw == 4 for 160mhz
+ */
+ u32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+};
+
+struct htt_pdev_obss_pd_stats_tlv {
+ u32 num_obss_tx_ppdu_success;
+ u32 num_obss_tx_ppdu_failure;
+};
+
+void ath11k_debug_htt_stats_init(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
new file mode 100644
index 000000000000..743760c9bcae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath11k_htt_tx_stats *tx_stats;
+ int gi, mcs, bw, nss;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
+ mcs = txrate->mcs;
+ bw = txrate->bw;
+ nss = txrate->nss - 1;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
+ }
+
+ if (peer_stats->is_ampdu) {
+ tx_stats->ba_fails += peer_stats->ba_fails;
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(AMPDU).he[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).he[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ tx_stats->ack_fails += peer_stats->ba_fails;
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
+
+ tx_stats->tx_duration += peer_stats->duration;
+}
+
+void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum hal_tx_rate_stats_bw bw;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u16 rate;
+ u8 rate_idx;
+ int ret;
+ u8 mcs;
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_warn(ab, "failed to find the peer\n");
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+ ts->rate_stats);
+ mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+ ts->rate_stats);
+ sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+ ts->rate_stats);
+ bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+
+ if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+ pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ goto err_out;
+ arsta->txrate.legacy = rate;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+ if (mcs > 7) {
+ ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs + 8 * (arsta->last_txrate.nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+ if (mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ /* TODO */
+ }
+
+ arsta->txrate.nss = arsta->last_txrate.nss;
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+
+ ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx);
+err_out:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_htt_data_stats *stats;
+ static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 2 * 4096;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " HE MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->he[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (0.4us,0.8us,1.6us,3.2us)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1],
+ stats->gi[j][2], stats->gi[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH11K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath11k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ int len = 0, i, retval = 0;
+ const int size = 4096;
+ char *buf;
+
+ if (!rx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+ len += scnprintf(buf + len, size - len,
+ "GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+ rx_stats->gi_count[0], rx_stats->gi_count[1],
+ rx_stats->gi_count[2], rx_stats->gi_count[3]);
+ len += scnprintf(buf + len, size - len,
+ "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
+ rx_stats->bw_count[0], rx_stats->bw_count[1],
+ rx_stats->bw_count[2], rx_stats->bw_count[3]);
+ len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
+ rx_stats->coding_count[0], rx_stats->coding_count[1]);
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+ len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
+ for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
+ len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
+ rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len, "\n");
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath11k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct debug_htt_stats_req *stats_req;
+ int ret;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
+ memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
+ ret = ath11k_dbg_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ return ret;
+}
+
+static int
+ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_htt_peer_stats = {
+ .open = ath11k_dbg_sta_open_htt_peer_stats,
+ .release = ath11k_dbg_sta_release_htt_peer_stats,
+ .read = ath11k_dbg_sta_read_htt_peer_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ int ret, enable;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = kstrtoint_from_user(buf, count, 0, &enable);
+ if (ret)
+ goto out;
+
+ ar->debug.pktlog_peer_valid = enable;
+ memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
+
+ /* Send peer based pktlog enable/disable */
+ ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
+ sta->addr, ret);
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
+ enable);
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[32] = {0};
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
+ ar->debug.pktlog_peer_valid,
+ ar->debug.pktlog_peer_addr);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_pktlog = {
+ .write = ath11k_dbg_sta_write_peer_pktlog,
+ .read = ath11k_dbg_sta_read_peer_pktlog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ if (ath11k_debug_is_extd_rx_stats_enabled(ar))
+ debugfs_create_file("rx_stats", 0400, dir, sta,
+ &fops_rx_stats);
+
+ debugfs_create_file("htt_peer_stats", 0400, dir, sta,
+ &fops_htt_peer_stats);
+
+ debugfs_create_file("peer_pktlog", 0644, dir, sta,
+ &fops_peer_pktlog);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
new file mode 100644
index 000000000000..b112825a52ed
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 reo_dest;
+ int ret;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
+ HAL_DESC_REO_NON_QOS_TID, 1, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
+ ret);
+ return ret;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+}
+
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = { 0 };
+ int entry_sz = ath11k_hal_srng_get_entrysize(type);
+ int max_entries = ath11k_hal_srng_get_max_entries(type);
+ int ret;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num >= 3 */
+ /* fall through */
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ case HAL_RXDMA_DIR_BUF:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ default:
+ ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ }
+ ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+}
+
+static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int i, ret;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, i, 0,
+ DP_TCL_DATA_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, i, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
+ ath11k_hal_tx_init_data_ring(ab, srng);
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ 3, 0, DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath11k_hal_reo_init_cmd_ring(ab, srng);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ ath11k_hal_reo_hw_setup(ab);
+
+ return 0;
+
+err:
+ ath11k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath11k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath11k_dp_srng_cleanup(ab, ring);
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ u32 paddr;
+ u32 *desc;
+ int i, ret;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
+ ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
+ i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i = 0;
+ int tot_work_done = 0;
+
+ while (ath11k_tx_ring_mask[grp_id] >> i) {
+ if (ath11k_tx_ring_mask[grp_id] & BIT(i))
+ ath11k_dp_tx_completion_handler(ab, i);
+ i++;
+ }
+
+ if (ath11k_rx_err_ring_mask[grp_id]) {
+ work_done = ath11k_dp_process_rx_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
+ work_done = ath11k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ath11k_rx_ring_mask[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ &irq_grp->pending_q,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+ if (budget <= 0)
+ goto done;
+ }
+ }
+
+ if (rx_mon_status_ring_mask[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+ if (budget <= 0)
+ goto done;
+ }
+ }
+
+ if (ath11k_reo_status_ring_mask[grp_id])
+ ath11k_dp_process_reo_status(ab);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+
+ if (budget <= 0)
+ goto done;
+
+ if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
+ struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
+ HAL_RX_BUF_RBM_SW3_BM,
+ GFP_ATOMIC);
+ }
+ }
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+
+void ath11k_dp_pdev_free(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ath11k_dp_rx_pdev_free(ab, i);
+ ath11k_debug_unregister(ar);
+ ath11k_dp_rx_pdev_mon_detach(ar);
+ }
+}
+
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+ }
+}
+
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int ret;
+ int i;
+
+ /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath11k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath11k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to initialize mon pdev %d\n",
+ i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_pdev_free(ab);
+
+ return ret;
+}
+
+int ath11k_dp_htt_connect(struct ath11k_dp *dp)
+{
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
+{
+ /* For STA mode, enable address search index,
+ * tcl uses ast_hash value in the descriptor.
+ */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
+ FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
+ arvif->vdev_id) |
+ FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
+ ar->pdev->pdev_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath11k_dp_update_vdev_search(arvif);
+}
+
+static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k_base *ab = (struct ath11k_base *)ctx;
+ struct sk_buff *msdu = skb;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ return 0;
+}
+
+void ath11k_dp_free(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath11k_dp_srng_common_cleanup(ab);
+
+ ath11k_dp_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
+ idr_for_each(&dp->tx_ring[i].txbuf_idr,
+ ath11k_dp_tx_pending_cleanup, ab);
+ idr_destroy(&dp->tx_ring[i].txbuf_idr);
+ spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+ kfree(dp->tx_ring[i].tx_status);
+ }
+
+ /* Deinit any SOC level resource */
+}
+
+int ath11k_dp_alloc(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_link_desc_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ idr_init(&dp->tx_ring[i].txbuf_idr);
+ spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status)
+ goto fail_cmn_srng_cleanup;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath11k_hal_tx_set_dscp_tid_map(ab, i);
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_cmn_srng_cleanup:
+ ath11k_dp_srng_common_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
new file mode 100644
index 000000000000..6ef5be4201b2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -0,0 +1,1535 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_H
+#define ATH11K_DP_H
+
+#include "hal_rx.h"
+
+struct ath11k_base;
+struct ath11k_peer;
+struct ath11k_dp;
+struct ath11k_vif;
+struct hal_tcl_status_ring;
+struct ath11k_ext_irq_grp;
+
+struct dp_rx_tid {
+ u8 tid;
+ u32 *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+};
+
+#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+
+struct dp_reo_cache_flush_elem {
+ struct list_head list;
+ struct dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct dp_reo_cmd {
+ struct list_head list;
+ struct dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status status);
+};
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH11K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct idr txbuf_idr;
+ /* Protects txbuf_idr and num_pending */
+ spinlock_t tx_idr_lock;
+ struct hal_wbm_release_ring *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+struct ath11k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath11k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct ath11k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+
+ struct ath11k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+ struct sk_buff_head rx_status_q;
+};
+
+struct ath11k_pdev_dp {
+ u32 mac_id;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_srng reo_dst_ring;
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rxdma_err_dst_ring;
+ struct dp_srng rxdma_mon_dst_ring;
+ struct dp_srng rxdma_mon_desc_ring;
+
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring;
+ struct ieee80211_rx_status rx_status;
+ struct ath11k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 0 /* Disable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 3
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TX_COMP_RING_SIZE 8192
+#define DP_TX_IDR_SIZE (DP_TX_COMP_RING_SIZE << 1)
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 4
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_STATUS_RING_SIZE 256
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(20, 18)
+
+#define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+struct ath11k_dp {
+ struct ath11k_base *ab;
+ enum ath11k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
+ spinlock_t reo_cmd_lock;
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+/* HTT tx completion is overlayed in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+
+#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24)
+
+struct htt_tx_wbm_completion {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ u32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserverd
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG BIT(16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ u32 info0;
+ u32 ring_base_addr_lo;
+ u32 ring_base_addr_hi;
+ u32 info1;
+ u32 ring_head_off32_remote_addr_lo;
+ u32 ring_head_off32_remote_addr_hi;
+ u32 ring_tail_off32_remote_addr_lo;
+ u32 ring_tail_off32_remote_addr_hi;
+ u32 ring_msi_addr_lo;
+ u32 ring_msi_addr_hi;
+ u32 msi_data;
+ u32 intr_info;
+ u32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ u32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ u32 info0;
+ u32 info1;
+ u32 pkt_type_en_flags0;
+ u32 pkt_type_en_flags1;
+ u32 pkt_type_en_flags2;
+ u32 pkt_type_en_flags3;
+ u32 rx_filter_tlv;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+};
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ u32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+ u32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath11k_htt_ppdu_stats_msg {
+ u32 info;
+ u32 ppdu_id;
+ u32 timestamp;
+ u32 rsvd;
+ u8 data[0];
+} __packed;
+
+struct htt_tlv {
+ u32 header;
+ u8 value[0];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ u32 ppdu_id;
+ u16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ u32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ u32 chain_mask;
+ u32 fes_duration_us; /* frame exchange sequence */
+ u32 ppdu_sch_eval_start_tstmp_us;
+ u32 ppdu_sch_end_tstmp_us;
+ u32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ u16 phy_mode;
+ u16 bw_mhz;
+} __packed;
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M, _val)
+#define HTT_USR_RATE_BW(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M, _val)
+#define HTT_USR_RATE_NSS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M, _val)
+#define HTT_USR_RATE_MCS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
+#define HTT_USR_RATE_GI(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ u16 sw_peer_id;
+ u32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ u16 ru_end;
+ u16 ru_start;
+ u16 resp_ru_end;
+ u16 resp_ru_start;
+ u32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ u32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ u32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M, _flags)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M, _flags)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M, _flags)
+#define HTT_TX_INFO_PEERID(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
+
+struct htt_tx_ppdu_stats_info {
+ struct htt_tlv tlv_hdr;
+ u32 tx_success_bytes;
+ u32 tx_retry_bytes;
+ u32 tx_failed_bytes;
+ u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+ u16 tx_success_msdus;
+ u16 tx_retry_msdus;
+ u16 tx_failed_msdus;
+ u16 tx_duration; /* united in us */
+} __packed;
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M, _val)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M, _val)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M, _val)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ u16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+ u16 mpdu_tried;
+ u16 mpdu_success;
+ u32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ u32 ppdu_id;
+ u16 sw_peer_id;
+ u16 reserved0;
+ u32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ u16 current_seq;
+ u16 start_seq;
+ u32 success_bytes;
+} __packed;
+
+struct htt_ppdu_stats_usr_cmn_array {
+ struct htt_tlv tlv_hdr;
+ u32 num_ppdu_stats;
+ /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
+ * elements.
+ * tx_ppdu_stats_info is variable length, with length =
+ * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
+ */
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 ppdu_id;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/**
+ * @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 16|15 12|11 10|9 8|7 0|
+ * |------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id| msg type |
+ * |------------------------------------------------------------------|
+ * | payload |
+ * |------------------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a pktlog message
+ * Value: HTT_T2H_MSG_TYPE_PKTLOG
+ * - mac_id
+ * Bits 9:8
+ * Purpose: identifies which MAC/PHY instance generated this pktlog info
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: explicitly specify the payload size
+ * Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+ u32 hdr;
+ u8 payload[0];
+};
+
+/**
+ * @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ u32 cfg_param0;
+ u32 cfg_param1;
+ u32 cfg_param2;
+ u32 cfg_param3;
+ u32 reserved;
+ u32 cookie_lsb;
+ u32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+/**
+ * @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_ext_stats_status
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath11k_htt_extd_stats_msg {
+ u32 info0;
+ u64 cookie;
+ u32 info1;
+ u8 data[0];
+} __packed;
+
+struct htt_mac_addr {
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+};
+
+static inline void ath11k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ addr_l32 = swab32(addr_l32);
+ addr_h16 = swab16(addr_h16);
+ }
+
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget);
+int ath11k_dp_htt_connect(struct ath11k_dp *dp);
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif);
+void ath11k_dp_free(struct ath11k_base *ab);
+int ath11k_dp_alloc(struct ath11k_base *ab);
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_free(struct ath11k_base *ab);
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring);
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
new file mode 100644
index 000000000000..b08da839b7d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -0,0 +1,4195 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include "core.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
+{
+ return desc->hdr_status;
+}
+
+static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
+{
+ if (!(__le32_to_cpu(desc->mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->mpdu_start.info2));
+}
+
+static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
+ __le32_to_cpu(desc->mpdu_start.info5));
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(desc->attention.info2));
+}
+
+static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
+ __le32_to_cpu(desc->attention.info2)) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->attention.info1);
+ u32 errmap = 0;
+
+ if (info & RX_ATTENTION_INFO1_FCS_ERR)
+ errmap |= DP_RX_MPDU_ERR_FCS;
+
+ if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
+ errmap |= DP_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
+ errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
+ errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
+ errmap |= DP_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->msdu_start.info1));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
+{
+ u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->msdu_start.info3));
+
+ return hweight8(mimo_ss_bitmap);
+}
+
+static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
+ sizeof(struct rx_msdu_end));
+ memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
+{
+ struct rx_attention *rx_attn;
+
+ rx_attn = &rx_desc->attention;
+
+ return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
+ __le32_to_cpu(rx_attn->info1));
+}
+
+static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
+{
+ struct rx_msdu_start *rx_msdu_start;
+
+ rx_msdu_start = &rx_desc->msdu_start;
+
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(rx_msdu_start->info2));
+}
+
+static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
+{
+ u8 *rx_pkt_hdr;
+
+ rx_pkt_hdr = &rx_desc->msdu_payload[0];
+
+ return rx_pkt_hdr;
+}
+
+static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(rx_desc->mpdu_start_tag));
+
+ return tlv_tag == HAL_RX_MPDU_START ? true : false;
+}
+
+static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
+{
+ return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
+}
+
+/* Returns number of Rx buffers replenished */
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, gfp);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_idr_remove;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_idr_remove:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap of
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* XXX: Understand where internal driver does this dma_unmap of
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath11k_hal_srng_get_entrysize(ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+
+ return 0;
+}
+
+static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+}
+
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_pdev_dp *dp;
+ struct ath11k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring);
+ }
+}
+
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST,
+ dp->mac_id, dp->mac_id,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *srng = NULL;
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0,
+ dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
+ HAL_RXDMA_DST, 0, dp->mac_id,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
+ return ret;
+ }
+
+ srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring\n");
+ return ret;
+ }
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
+ HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
+ HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DESC_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DESC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *cmd, *tmp;
+ struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ dma_unmap_single(ab->dev, cmd->data.paddr,
+ cmd->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.vaddr);
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dma_unmap_single(ab->dev, cmd_cache->data.paddr,
+ cmd_cache->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.vaddr);
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+
+ dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+}
+
+static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
+ struct dp_rx_tid *rx_tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath11k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath11k_dp_reo_cmd_free);
+ if (ret) {
+ ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ }
+}
+
+static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath11k_base *ab = dp->ab;
+ struct dp_rx_tid *rx_tid = ctx;
+ struct dp_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (time_after(jiffies, elem->ts +
+ msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath11k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+}
+
+static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath11k_dp_rx_tid_del_func);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ }
+
+ rx_tid->active = false;
+}
+
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ int i;
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ ath11k_peer_rx_tid_delete(ar, peer, i);
+}
+
+static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+ }
+
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
+ const u8 *peer_mac, int vdev_id, u8 tid)
+{
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
+ goto unlock_exit;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ goto unlock_exit;
+
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+
+ rx_tid->active = false;
+
+unlock_exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u32 hw_desc_sz;
+ u32 *addr_aligned;
+ void *vaddr;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ peer_mac, paddr,
+ tid, 1, ba_win_sz);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
+ tid, ret);
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on the
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
+ if (!vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
+
+ paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret) {
+ spin_unlock_bh(&ab->base_lock);
+ goto err_mem_free;
+ }
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+ paddr, tid, 1, ba_win_sz);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
+ tid, ret);
+ ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
+ }
+
+ return ret;
+
+err_mem_free:
+ kfree(vaddr);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn);
+ if (ret)
+ ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ dma_addr_t paddr;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ paddr = peer->rx_tid[params->tid].paddr;
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ params->sta->addr, paddr,
+ params->tid, 1, 1);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = (struct htt_ppdu_stats_info *)data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id =
+ ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static u32 ath11k_bw_to_mac80211_bwflags(u8 bw)
+{
+ u32 bwflags = 0;
+
+ switch (bw) {
+ case ATH11K_BW_40:
+ bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case ATH11K_BW_80:
+ bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case ATH11K_BW_160:
+ bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+
+ return bwflags;
+}
+
+static void
+ath11k_update_per_peer_tx_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ieee80211_chanctx_conf *conf = NULL;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, rate_idx = 0;
+ u32 succ_bytes = 0;
+ u16 rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!usr_stats)
+ return;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = usr_stats->ack_ba.success_bytes;
+ succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
+ usr_stats->ack_ba.info);
+ tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
+ usr_stats->ack_ba.info);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = common->fes_duration_us;
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) {
+ ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ if (arsta->arvif && arsta->arvif->vif)
+ conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
+ if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
+ arsta->tx_info.status.rates[0].idx = rate_idx - 4;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ arsta->tx_info.status.rates[0].idx = rate_idx;
+ if (mcs > ATH11K_HW_RATE_CCK_LP_1M &&
+ mcs <= ATH11K_HW_RATE_CCK_SP_2M)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
+ if (sgi) {
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss);
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
+ if (sgi) {
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ if (succ_pkts) {
+ arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
+ arsta->tx_info.status.rates[0].count = 1;
+ ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
+ }
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ ath11k_accumulate_per_peer_tx_stats(arsta,
+ peer_stats, rate_idx);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ spin_lock_bh(&ar->data_lock);
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ppdu_info;
+ }
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
+ if (!ppdu_info)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+ spin_unlock_bh(&ar->data_lock);
+
+ return ppdu_info;
+}
+
+static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath11k *ar;
+ int ret;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
+ ppdu_id = msg->ppdu_id;
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
+ trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+
+ ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath11k *ar;
+ u32 len;
+ u8 pdev_id;
+
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr);
+ if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) {
+ ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n",
+ len,
+ ATH11K_HTT_PKTLOG_MAX_SIZE);
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+ return;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, len);
+}
+
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
+ resp->version_msg.version);
+ dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
+ resp->version_msg.version);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+ resp->peer_map_ev.info2);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
+ resp->peer_unmap_ev.info);
+ ath11k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath11k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath11k_dbg_htt_ext_stats_handler(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG:
+ ath11k_htt_pktlog(ab, skb);
+ break;
+ default:
+ ath11k_warn(ab, "htt event %d not handled\n", type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra;
+ int rem_len;
+ int buf_len;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers attention, MSDU_END and
+ * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
+ skb_pull(skb, HAL_RX_DESC_SIZE);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff_head *amsdu_list)
+{
+ struct sk_buff *msdu = skb_peek(msdu_list);
+ struct sk_buff *last_buf;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ u16 msdu_len;
+ u8 l3_pad_bytes;
+ u8 *hdr_status;
+ int ret;
+
+ if (!msdu)
+ return -ENOENT;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ hdr = (struct ieee80211_hdr *)hdr_status;
+ /* Process only data frames */
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ __skb_unlink(msdu, msdu_list);
+ dev_kfree_skb_any(msdu);
+ return -EINVAL;
+ }
+
+ do {
+ __skb_unlink(msdu, msdu_list);
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ar->ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+
+ if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
+ ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
+
+ if (!rxcb->is_continuation) {
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+ __skb_queue_tail(amsdu_list, msdu);
+
+ /* Should we also consider msdu_cnt from mpdu_meta while
+ * preparing amsdu list?
+ */
+ if (rxcb->is_last_msdu)
+ break;
+ } while ((msdu = skb_peek(msdu_list)) != NULL);
+
+ return 0;
+
+free_out:
+ dev_kfree_skb_any(msdu);
+ __skb_queue_purge(amsdu_list);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool ip_csum_fail, l4_csum_fail;
+
+ ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
+ l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+
+ /* pull decapped header and copy SA & DA */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_amsdu;
+
+ is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
+ rfc1042 = hdr;
+
+ if (rxcb->is_first_msdu) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += hdr_len + crypto_len;
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ void *rfc1042;
+
+ rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
+ sizeof(struct ath11k_dp_rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ u8 *first_hdr;
+ u8 decap;
+
+ first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ enum hal_encrypt_type enctype;
+ struct sk_buff *last_msdu;
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *last_rxcb;
+ bool is_decrypted;
+ u32 err_bitmap;
+ u8 *qos;
+
+ if (skb_queue_empty(amsdu_list))
+ return;
+
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last_msdu = skb_peek_tail(amsdu_list);
+ last_rxcb = ATH11K_SKB_RXCB(last_msdu);
+
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
+
+ skb_queue_walk(amsdu_list, msdu) {
+ ath11k_dp_rx_h_csum_offload(msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+ }
+}
+
+static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck;
+
+ pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
+ bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
+ rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
+ nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
+ sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH11K_HT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 channel_num;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+
+ if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
+ channel_num);
+ return;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct sk_buff *first;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ bool first_mpdu;
+
+ if (skb_queue_empty(amsdu_list))
+ return;
+
+ first = skb_peek(amsdu_list);
+ rxcb = ATH11K_SKB_RXCB(first);
+ rx_desc = rxcb->rx_desc;
+
+ first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
+ if (first_mpdu)
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+
+ ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
+}
+
+static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
+ size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
+static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_radiotap_he *he = NULL;
+ char tid[32];
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ if (status->encoding == RX_ENC_HE) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ ieee80211_get_SA(hdr),
+ ath11k_print_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ /* TODO: trace rx packet */
+
+ ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
+}
+
+static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct ieee80211_rx_status *rxs)
+{
+ struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+ struct ieee80211_rx_status *status;
+
+ first_subframe = skb_peek(amsdu_list);
+
+ skb_queue_walk(amsdu_list, msdu) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu_list))
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_SKIP_MONITOR;
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = *rxs;
+ }
+}
+
+static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *pending_q,
+ int *quota, u8 mac_id)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_pdev *pdev;
+
+ if (skb_queue_empty(pending_q))
+ return;
+
+ ar = ab->pdevs[mac_id].ar;
+
+ rcu_read_lock();
+ pdev = rcu_dereference(ab->pdevs_active[mac_id]);
+
+ while (*quota && (msdu = __skb_dequeue(pending_q))) {
+ if (!pdev) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ (*quota)--;
+ }
+ rcu_read_unlock();
+}
+
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, struct sk_buff_head *pending_q,
+ int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ieee80211_rx_status *rx_status = &dp->rx_status;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list;
+ struct sk_buff_head amsdu_list;
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id;
+ int num_buffs_reaped = 0;
+ int quota = budget;
+ int ret;
+ bool done = false;
+
+ /* Process any pending packets from the previous napi poll.
+ * Note: All msdu's in this pending_q corresponds to the same mac id
+ * due to pdev based reo dest mapping and also since each irq group id
+ * maps to specific reo dest ring.
+ */
+ ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
+ mac_id);
+
+ /* If all quota is exhausted by processing the pending_q,
+ * Wait for the next napi poll to reap the new info
+ */
+ if (!quota)
+ goto exit;
+
+ __skb_queue_head_init(&msdu_list);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+try_again:
+ while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+
+ cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ desc->buf_addr_info.info1);
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ cookie);
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ /* TODO: Check if the msdu can be sent up for processing */
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list, msdu);
+
+ /* Stop reaping from the ring once quota is exhausted
+ * and we've received all msdu's in the the AMSDU. The
+ * additional msdu's reaped in excess of quota here would
+ * be pushed into the pending queue to be processed during
+ * the next napi poll.
+ * Note: More profiling can be done to see the impact on
+ * pending_q and throughput during various traffic & density
+ * and how use of budget instead of remaining quota affects it.
+ */
+ if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
+ !rxcb->is_continuation) {
+ done = true;
+ break;
+ }
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ goto exit;
+
+ /* Should we reschedule it later if we are not able to replenish all
+ * the buffers?
+ */
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ __skb_queue_purge(&msdu_list);
+ goto rcu_unlock;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list);
+ goto rcu_unlock;
+ }
+
+ while (!skb_queue_empty(&msdu_list)) {
+ __skb_queue_head_init(&amsdu_list);
+ ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
+ if (ret) {
+ if (ret == -EIO) {
+ ath11k_err(ab, "rx ring got corrupted %d\n", ret);
+ __skb_queue_purge(&msdu_list);
+ /* Should stop processing any more rx in
+ * future from this ring?
+ */
+ goto rcu_unlock;
+ }
+
+ /* A-MSDU retrieval got failed due to non-fatal condition,
+ * continue processing with the next msdu.
+ */
+ continue;
+ }
+
+ ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
+
+ ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
+ skb_queue_splice_tail(&amsdu_list, pending_q);
+ }
+
+ while (quota && (msdu = __skb_dequeue(pending_q))) {
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ quota--;
+ }
+
+rcu_unlock:
+ rcu_read_unlock();
+exit:
+ return budget - quota;
+}
+
+static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+
+ if (!rx_stats)
+ return;
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
+ rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
+
+ if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
+ rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX)
+ rx_stats->gi_count[ppdu_info->gi] += num_msdu;
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX)
+ rx_stats->bw_count[ppdu_info->bw] += num_msdu;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+}
+
+static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ int *buf_id, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, gfp);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id, gfp);
+ if (!skb)
+ break;
+ paddr = ATH11K_SKB_RXCB(skb)->paddr;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_desc_get;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_desc_get:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
+ struct hal_srng *srng;
+ void *rx_mon_status_desc;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ u32 cookie;
+ int buf_id;
+ dma_addr_t paddr;
+ u8 rbm;
+ int num_buffs_reaped = 0;
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc =
+ ath11k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc)
+ break;
+
+ ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ continue;
+ }
+
+ __skb_queue_tail(skb_list, skb);
+ }
+
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id, GFP_ATOMIC);
+
+ if (!skb) {
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ HAL_RX_BUF_RBM_SW3_BM);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie, HAL_RX_BUF_RBM_SW3_BM);
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct hal_rx_mon_ppdu_info ppdu_info;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
+
+ if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
+ trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
+
+ if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info.peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
+
+ if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ }
+exit:
+ return num_buffs_reaped;
+}
+
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 rx_channel;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted;
+ u32 err_bitmap;
+
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+
+ if (rx_channel >= 1 && rx_channel <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (rx_channel >= 36 && rx_channel <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
+ rx_channel);
+ return;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
+ rx_status->band);
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+
+ /* Rx fragments are received in raw mode */
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+ }
+}
+
+static int
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
+ int buf_id, bool frag)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct ieee80211_rx_status rx_status = {0};
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_rx_status *status;
+ struct hal_rx_desc *rx_desc;
+ u16 msdu_len;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return -EINVAL;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (!frag) {
+ /* Process only rx fragments below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+
+ ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
+
+ status = IEEE80211_SKB_RXCB(msdu);
+
+ *status = rx_status;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ int tot_n_bufs_reaped, quota, ret, i;
+ int n_bufs_reaped[MAX_RADIOS] = {0};
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath11k_dp *dp;
+ void *link_desc_va;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ dma_addr_t paddr;
+ u32 *desc;
+ bool is_frag;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
+
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
+ msdu_cookies[i]);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
+ is_frag)) {
+ n_bufs_reaped[mac_id]++;
+ tot_n_bufs_reaped++;
+ }
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!n_bufs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath11k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff_head amsdu_list;
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
+ ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
+ ath11k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ __skb_queue_head_init(&amsdu_list);
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+
+ if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ __skb_queue_tail(&amsdu_list, msdu);
+
+ ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath11k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {0};
+ struct ieee80211_rx_status *status;
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = rxs;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+}
+
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id, mac_id;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
+ int total_num_buffs_reaped = 0;
+ int ret, i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
+ buf_id, mac_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+ total_num_buffs_reaped++;
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_num_buffs_reaped)
+ goto done;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return total_num_buffs_reaped;
+}
+
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
+ struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
+ struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
+ struct hal_srng *srng;
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ enum hal_rx_buf_return_buf_manager rbm;
+ enum hal_reo_entr_rxdma_ecode rxdma_err_code;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ struct hal_reo_entrance_ring *entr_ring;
+ void *desc;
+ int num_buf_freed = 0;
+ int quota = budget;
+ dma_addr_t paddr;
+ u32 desc_bank;
+ void *link_desc_va;
+ int num_msdus;
+ int i;
+ int buf_id;
+
+ srng = &ab->hal.srng_list[err_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (quota-- &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
+
+ entr_ring = (struct hal_reo_entrance_ring *)desc;
+ rxdma_err_code =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ entr_ring->info1);
+ ab->soc_stats.rxdma_error[rxdma_err_code]++;
+
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+ msdu_cookies, &rbm);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+
+ num_buf_freed++;
+ }
+
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (num_buf_freed)
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+
+ return budget - quota;
+}
+
+void ath11k_dp_process_reo_status(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ struct dp_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u32 *reo_desc;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath11k_hal_reo_status_queue_stats(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath11k_hal_reo_flush_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath11k_hal_reo_flush_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+
+ ath11k_dp_rx_pdev_srng_free(ar);
+ ath11k_dp_rxdma_pdev_buf_free(ar);
+}
+
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int ret;
+
+ ret = ath11k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->rxdma_err_dst_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_dst_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_desc_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DESC);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring %d\n",
+ ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
+{
+ if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
+ *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
+ *total_len -= *frag_len;
+ } else {
+ *frag_len = *total_len;
+ *total_len = 0;
+ }
+}
+
+static
+int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
+ void *p_last_buf_addr_info,
+ u8 mac_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *dp_srng;
+ void *hal_srng;
+ void *src_srng_desc;
+ int ret = 0;
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+
+ ath11k_hal_srng_access_begin(ar->ab, hal_srng);
+
+ src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
+
+ if (src_srng_desc) {
+ struct ath11k_buffer_addr *src_desc =
+ (struct ath11k_buffer_addr *)src_srng_desc;
+
+ *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "Monitor Link Desc Ring %d Full", mac_id);
+ ret = -ENOMEM;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, hal_srng);
+ return ret;
+}
+
+static
+void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info)
+{
+ struct hal_rx_msdu_link *msdu_link =
+ (struct hal_rx_msdu_link *)rx_msdu_link_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ u8 rbm = 0;
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
+
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
+
+ *pp_buf_addr_info = (void *)buf_addr_info;
+}
+
+static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+ return 0;
+}
+
+static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
+ void *msdu_link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ struct hal_rx_msdu_link *msdu_link = NULL;
+ int i;
+ u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
+ u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
+ u8 tmp = 0;
+
+ msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
+ msdu_details = &msdu_link->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu_details[i].buf_addr_info.info0) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= last;
+ ;
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= first;
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= last;
+ msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu_details[i].buf_addr_info.info1);
+ tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu_details[i].buf_addr_info.info1);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
+static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
+ u32 *rx_bufs_used)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* mon_dst is behind than mon_status
+ * skip dst_ring and free it
+ */
+ *rx_bufs_used += 1;
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ if (!*is_frag) {
+ *total_len = info->msdu_len;
+ *is_frag = true;
+ }
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ if (*is_frag) {
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ *frag_len = info->msdu_len;
+ }
+ *is_frag = false;
+ *msdu_cnt -= 1;
+ }
+}
+
+static u32
+ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_rx_msdu_list msdu_list;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ struct hal_rx_desc *rx_desc;
+ void *rx_msdu_link_desc;
+ dma_addr_t paddr;
+ u16 num_msdus = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ u32 rx_bufs_used = 0, i = 0;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ int buf_id;
+
+ ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie, &p_last_buf_addr_info,
+ &msdu_cnt);
+
+ if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
+ ent_desc->info1) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ ent_desc->info1);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ return rx_bufs_used;
+ }
+
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "msdu_pop: invalid buf_id %d\n", buf_id);
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+
+ msdu_ppdu_id =
+ ath11k_dp_rxdesc_get_ppduid(rx_desc);
+
+ if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id,
+ &rx_bufs_used)) {
+ if (rx_bufs_used) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
+ &sw_cookie,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct sk_buff *last_msdu,
+ struct ieee80211_rx_status *rxs)
+{
+ struct sk_buff *msdu, *mpdu_buf, *prev_buf;
+ u32 decap_format, wifi_hdr_len;
+ struct hal_rx_desc *rx_desc;
+ char *hdr_desc;
+ u8 *dest;
+ struct ieee80211_hdr_3addr *wh;
+
+ mpdu_buf = NULL;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+
+ if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
+ return NULL;
+
+ decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dp_rx_msdus_set_payload(head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ __le16 qos_field;
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+
+ /* Base size */
+ wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control)) {
+ struct ieee80211_qos_hdr *qwh =
+ (struct ieee80211_qos_hdr *)hdr_desc;
+
+ qos_field = qwh->qos_ctrl;
+ qos_pkt = 1;
+ }
+ msdu = head_msdu;
+
+ while (msdu) {
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, wifi_hdr_len);
+ memcpy(dest + wifi_hdr_len,
+ (u8 *)&qos_field, sizeof(__le16));
+ }
+ ath11k_dp_rx_msdus_set_payload(msdu);
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "mpdu_buf %pK mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ /* Free the head buffer */
+ dev_kfree_skb_any(mpdu_buf);
+ }
+ return NULL;
+}
+
+static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct sk_buff *tail_msdu,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
+
+ mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
+ tail_msdu, rxs);
+
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+
+ rxs->flag = 0;
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ status = IEEE80211_SKB_RXCB(mon_skb);
+ *status = *rxs;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ void *ring_entry;
+ void *mon_dst_srng;
+ u32 ppdu_id;
+ u32 rx_bufs_used;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ u32 npackets = 0;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+
+ if (!mon_dst_srng) {
+ ath11k_warn(ar->ab,
+ "HAL Monitor Destination Ring Init Failed -- %pK",
+ mon_dst_srng);
+ return;
+ }
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_bufs_used = 0;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+ break;
+ }
+ if (head_msdu && tail_msdu) {
+ ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ }
+
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+}
+
+static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
+ u32 quota,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct hal_rx_mon_ppdu_info *ppdu_info;
+ struct sk_buff *status_skb;
+ u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+
+ ppdu_info = &pmon->mon_ppdu_info;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
+ return;
+
+ while (!skb_queue_empty(&pmon->rx_status_q)) {
+ status_skb = skb_dequeue(&pmon->rx_status_q);
+
+ tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
+ status_skb);
+ if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath11k_dp_rx_mon_dest_process(ar, quota, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+ dev_kfree_skb_any(status_skb);
+ }
+}
+
+static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ int num_buffs_reaped = 0;
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
+ &pmon->rx_status_q);
+ if (num_buffs_reaped)
+ ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
+
+ return num_buffs_reaped;
+}
+
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ int ret = 0;
+
+ if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
+ ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
+ else
+ ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+
+ skb_queue_head_init(&pmon->rx_status_q);
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_srng *mon_desc_srng = NULL;
+ struct dp_srng *dp_srng;
+ int ret = 0;
+ u32 n_link_desc = 0;
+
+ ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ n_link_desc = dp_srng->size /
+ ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
+ mon_desc_srng =
+ &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
+ n_link_desc);
+ if (ret) {
+ ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
+ return ret;
+ }
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+ return 0;
+}
+
+static int ath11k_dp_mon_link_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+
+ ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC,
+ &dp->rxdma_mon_desc_ring);
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
+{
+ ath11k_dp_mon_link_free(ar);
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
new file mode 100644
index 000000000000..eec5deaa59ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_DP_RX_H
+#define ATH11K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_RX_MPDU_ERR_FCS BIT(0)
+#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
+#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+enum dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath11k_dp_amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+struct ath11k_dp_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn);
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab);
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab);
+void ath11k_dp_process_reo_status(struct ath11k_base *ab);
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget);
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, struct sk_buff_head *pending_q,
+ int budget);
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp);
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp);
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+
+#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
new file mode 100644
index 000000000000..6d7d33761caf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "hw.h"
+
+/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
+static const u8
+ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
+
+static enum hal_tcl_encap_type
+ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
+{
+ /* TODO: Determine encap type based on vif_type and configuration */
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {0};
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ void *hal_tcl_desc;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return -ENOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
+ ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
+ DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (ret < 0)
+ return -ENOSPC;
+
+ ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
+ FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
+ ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
+ ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (info->control.hw_key)
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher);
+ else
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
+ }
+
+ if (ieee80211_vif_is_mesh(arvif->vif))
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1);
+
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
+
+ ti.tid = ath11k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
+ * is not applicable, hence manual checksum calculation using
+ * skb_checksum_help() is needed
+ */
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, ti.paddr)) {
+ ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_idr;
+ }
+
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (!hal_tcl_desc) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ * So add tx packet throttling logic in future if required.
+ */
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+ goto fail_unmap_dma;
+ }
+
+ ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
+ sizeof(struct hal_tlv_hdr), &ti);
+
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+
+fail_remove_idr:
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ idr_remove(&tx_ring->txbuf_idr,
+ FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
+ int msdu_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ struct dp_tx_ring *tx_ring,
+ struct ath11k_dp_htt_wbm_tx_status *ts)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k *ar;
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
+ ts->msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ ieee80211_tx_status(ar->hw, msdu);
+}
+
+static void
+ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
+ void *desc, u8 mac_id,
+ u32 msdu_id, struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath11k_dp_htt_wbm_tx_status ts = {0};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+ status_desc->info0);
+
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.msdu_id = msdu_id;
+ ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+ status_desc->info1);
+ ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+
+ if (ts->try_cnt > 1) {
+ peer_stats->retry_pkts += ts->try_cnt - 1;
+ peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
+
+ if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+ peer_stats->failed_pkts += 1;
+ peer_stats->failed_bytes += msdu->len;
+ }
+ }
+}
+
+static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ rcu_read_lock();
+
+ if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (!skb_cb->vif) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
+ if (ar->last_ppdu_id == 0) {
+ ar->last_ppdu_id = ts->ppdu_id;
+ } else if (ar->last_ppdu_id == ts->ppdu_id ||
+ ar->cached_ppdu_id == ar->last_ppdu_id) {
+ ar->cached_ppdu_id = ar->last_ppdu_id;
+ ar->cached_stats.is_ampdu = true;
+ ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ } else {
+ ar->cached_stats.is_ampdu = false;
+ ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ }
+ ar->last_ppdu_id = ts->ppdu_id;
+ }
+
+ ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
+ }
+
+ /* NOTE: Tx rate status reporting. Tx completion status does not have
+ * necessary information (for example nss) to build the tx rate.
+ * Might end up reporting it out-of-band from HTT stats.
+ */
+
+ ieee80211_tx_status(ar->hw, msdu);
+
+exit:
+ rcu_read_unlock();
+}
+
+static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
+ if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ return;
+
+ if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ return;
+
+ ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
+ desc->info0);
+ ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
+ desc->info1);
+ ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
+ desc->info1);
+ ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
+ desc->info2);
+ if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
+ ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
+ ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
+ ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
+ if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = desc->rate_stats.info0;
+ else
+ ts->rate_stats = 0;
+}
+
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = { 0 };
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ u32 *desc;
+ u32 msdu_id;
+ u8 mac_id;
+
+ ath11k_hal_srng_access_begin(ab, status_ring);
+
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(struct hal_wbm_release_ring));
+ tx_ring->tx_status_head =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath11k_hal_srng_access_end(ab, status_ring);
+
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath11k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ tx_status->buf_addr_info.info1);
+ mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
+ msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
+
+ if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ ath11k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu_id,
+ tx_ring);
+ continue;
+ }
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ continue;
+ }
+ idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status))
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num <= 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resoruce on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static int
+ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int lmac_ring_id_offset = 0;
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ default:
+ ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_SRING_SETUP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
+ htt_ring_type);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
+
+ cmd->ring_base_addr_lo = params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK;
+
+ cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ ret = ath11k_hal_srng_get_entrysize(ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
+ ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
+ params.num_entries * ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
+
+ cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_msi_addr_lo = 0;
+ cmd->ring_msi_addr_hi = 0;
+ cmd->msi_data = 0;
+
+ cmd->intr_info = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
+ params.intr_batch_cntr_thres_entries * ring_entry_sz);
+ cmd->intr_info |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
+ params.intr_timer_thres_us >> 3);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
+ params.low_threshold);
+ }
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
+ HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (ar->pdev_idx);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
+ htt_ring_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+
+ cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
+ rx_buf_size);
+ cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
+ cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
+ cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
+ cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
+ cmd->rx_filter_tlv = tlv_filter->rx_filter;
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cfg_params->cfg0;
+ cmd->cfg_param1 = cfg_params->cfg1;
+ cmd->cfg_param2 = cfg_params->cfg2;
+ cmd->cfg_param3 = cfg_params->cfg3;
+ cmd->cookie_lsb = lower_32_bits(cookie);
+ cmd->cookie_msb = upper_32_bits(cookie);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath11k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ int ret = 0, ring_id = 0;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret)
+ return ret;
+
+ ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ if (!reset)
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ else
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
new file mode 100644
index 000000000000..f8a9f9c8e444
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_TX_H
+#define ATH11K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath11k_dp_htt_wbm_tx_status {
+ u32 msdu_id;
+ bool acked;
+ int ack_rssi;
+};
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb);
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*func)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status));
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask);
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset);
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
new file mode 100644
index 000000000000..b58ac11c2747
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -0,0 +1,1124 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include "ahb.h"
+#include "hal_tx.h"
+#include "debug.h"
+#include "hal_desc.h"
+
+static const struct hal_srng_config hw_srng_config[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ { /* REO_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
+ },
+ .reg_size = {
+ HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
+ HAL_REO2_RING_HP - HAL_REO1_RING_HP,
+ },
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_EXCEPTION */
+ /* Designating REO2TCL ring as exception ring. This ring is
+ * similar to other REO2SW rings though it is named as REO2TCL.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
+ },
+ .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_REINJECT */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
+ },
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
+ },
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO_STATUS_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
+ },
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_DATA */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 3,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_data_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
+ },
+ .reg_size = {
+ HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
+ HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
+ },
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_gse_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
+ },
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL_STATUS_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
+ },
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_SRC */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
+ HAL_CE_DST_RING_BASE_LSB),
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
+ },
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_RING_BASE_LSB),
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ },
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_HP),
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ },
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM_IDLE_LINK */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
+ },
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* SW2WBM_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_RELEASE_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
+ },
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM2SW_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM0_RELEASE_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
+ },
+ .reg_size = {
+ (HAL_WBM1_RELEASE_RING_BASE_LSB -
+ HAL_WBM0_RELEASE_RING_BASE_LSB),
+ (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
+ },
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* RXDMA_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DESC */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA DIR BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 1,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+};
+
+static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+ val = ath11k_ahb_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
+ srng->u.dst_ring.max_buffer_length);
+ ath11k_ahb_write32(ab, addr, val);
+}
+
+static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
+ (u32)srng->msi_addr);
+
+ val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
+ srng->msi_data);
+ }
+
+ ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
+ FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
+
+ /* interrupt setup */
+ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
+ (srng->intr_timer_thres_us >> 3));
+
+ val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_ahb_write32(ab, reg_base, 0);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+}
+
+static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
+ (u32)srng->msi_addr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
+ val);
+
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET,
+ srng->msi_data);
+ }
+
+ ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
+
+ /* interrupt setup */
+ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
+ * unit of 8 usecs instead of 1 usec (as required by v1).
+ */
+ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
+ srng->intr_timer_thres_us);
+
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
+ srng->u.src_ring.low_threshold);
+ }
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_ahb_write32(ab, reg_base, 0);
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
+}
+
+static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath11k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->lmac_ring)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_get_entrysize(u32 ring_type)
+{
+ const struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &hw_srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath11k_hal_srng_get_max_entries(u32 ring_type)
+{
+ const struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &hw_srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data)
+{
+ struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
+ byte_swap_data) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
+ desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
+}
+
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
+{
+ struct hal_ce_srng_dest_desc *desc =
+ (struct hal_ce_srng_dest_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
+}
+
+u32 ath11k_hal_ce_dst_status_get_length(void *buf)
+{
+ struct hal_ce_srng_dst_status_desc *desc =
+ (struct hal_ce_srng_dst_status_desc *)buf;
+ u32 len;
+
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+
+ return len;
+}
+
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ (paddr & HAL_ADDR_LSB_REG_MASK));
+ desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
+}
+
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ else
+ srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+}
+
+/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: See if we need a write memory barrier here */
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ else
+ *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ ath11k_ahb_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ ath11k_ahb_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+}
+
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath11k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
+ link_addr->info1 = FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
+ FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
+ reg_scatter_buf_sz * nsbufs));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL));
+
+ /* Setup head and tail pointers for the idle list */
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[nsbufs - 1].paddr));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[nsbufs - 1].paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
+ (end_offset >> 2)));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
+ 0));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
+ 2 * tot_link_desc);
+
+ /* Enable the SRNG */
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
+}
+
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 lmac_idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ spin_lock_init(&srng->lock);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ /* TODO: Add comments on these swap configurations */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
+ HAL_SRNG_FLAGS_RING_PTR_SWAP;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->lmac_ring) {
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->lmac_ring) {
+ /* For LMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ }
+ }
+
+ if (srng_config->lmac_ring)
+ return ring_id;
+
+ ath11k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath11k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_init(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ hal->srng_config = hw_srng_config;
+
+ ret = ath11k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ return 0;
+
+err_free_cont_rdp:
+ ath11k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+
+void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+{
+ ath11k_hal_free_cont_rdp(ab);
+ ath11k_hal_free_cont_wrp(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
new file mode 100644
index 000000000000..5b13ccdf39e4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -0,0 +1,897 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_H
+#define ATH11K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath11k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define SHADOW_BASE_ADDRESS 0x00003024
+#define SHADOW_NUM_REGISTERS 36
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
+#define HAL_TCL1_RING_BASE_LSB 0x00000510
+#define HAL_TCL1_RING_BASE_MSB 0x00000514
+#define HAL_TCL1_RING_ID 0x00000518
+#define HAL_TCL1_RING_MISC 0x00000520
+#define HAL_TCL1_RING_TP_ADDR_LSB 0x0000052c
+#define HAL_TCL1_RING_TP_ADDR_MSB 0x00000530
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 0x00000540
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 0x00000544
+#define HAL_TCL1_RING_MSI1_BASE_LSB 0x00000558
+#define HAL_TCL1_RING_MSI1_BASE_MSB 0x0000055c
+#define HAL_TCL1_RING_MSI1_DATA 0x00000560
+#define HAL_TCL2_RING_BASE_LSB 0x00000568
+#define HAL_TCL_RING_BASE_LSB 0x00000618
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET \
+ (HAL_TCL1_RING_MSI1_BASE_LSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_MSI1_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET \
+ (HAL_TCL1_RING_MSI1_DATA - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_ID_OFFSET \
+ (HAL_TCL1_RING_ID - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET \
+ (HAL_TCL1_RING_TP_ADDR_LSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET \
+ (HAL_TCL1_RING_TP_ADDR_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MISC_OFFSET \
+ (HAL_TCL1_RING_MISC - HAL_TCL1_RING_BASE_LSB)
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002018
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB 0x00000720
+#define HAL_TCL_STATUS_RING_HP 0x00002030
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_RING_BASE_LSB 0x0000029c
+#define HAL_REO1_RING_BASE_MSB 0x000002a0
+#define HAL_REO1_RING_ID 0x000002a4
+#define HAL_REO1_RING_MISC 0x000002ac
+#define HAL_REO1_RING_HP_ADDR_LSB 0x000002b0
+#define HAL_REO1_RING_HP_ADDR_MSB 0x000002b4
+#define HAL_REO1_RING_PRODUCER_INT_SETUP 0x000002c0
+#define HAL_REO1_RING_MSI1_BASE_LSB 0x000002e4
+#define HAL_REO1_RING_MSI1_BASE_MSB 0x000002e8
+#define HAL_REO1_RING_MSI1_DATA 0x000002ec
+#define HAL_REO2_RING_BASE_LSB 0x000002f4
+#define HAL_REO1_AGING_THRESH_IX_0 0x00000564
+#define HAL_REO1_AGING_THRESH_IX_1 0x00000568
+#define HAL_REO1_AGING_THRESH_IX_2 0x0000056c
+#define HAL_REO1_AGING_THRESH_IX_3 0x00000570
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
+ (HAL_REO1_RING_MSI1_BASE_LSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
+ (HAL_REO1_RING_MSI1_BASE_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MSI1_DATA_OFFSET \
+ (HAL_REO1_RING_MSI1_DATA - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_BASE_MSB_OFFSET \
+ (HAL_REO1_RING_BASE_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_ID_OFFSET (HAL_REO1_RING_ID - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
+ (HAL_REO1_RING_HP_ADDR_LSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
+ (HAL_REO1_RING_HP_ADDR_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MISC_OFFSET (HAL_REO1_RING_MISC - HAL_REO1_RING_BASE_LSB)
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP 0x00003038
+#define HAL_REO1_RING_TP 0x0000303c
+#define HAL_REO2_RING_HP 0x00003040
+
+#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+
+/* REO2TCL R0 ring configuration address */
+#define HAL_REO_TCL_RING_BASE_LSB 0x000003fc
+
+/* REO2TCL R2 ring pointer (head/tail) address */
+#define HAL_REO_TCL_RING_HP 0x00003058
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP 0x00003020
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP 0x00003028
+
+/* CE ring R0 address */
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB 0x00000504
+#define HAL_REO_STATUS_HP 0x00003070
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
+#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
+#define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910
+#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
+#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
+
+/* TCL ring feild mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring feild mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+
+#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc))
+
+/* Add any other errors here and return them in
+ * ath11k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW1 = 0,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_SW2REO,
+
+ HAL_SRNG_RING_ID_REO_CMD = 8,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 16,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 32,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 56,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
+ HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 127,
+ HAL_SRNG_RING_ID_LMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+
+ HAL_SRNG_RING_ID_LMAC1_ID_END = 143
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+#define HAL_SRNG_NUM_LMACS 3
+#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1
+#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
+ HAL_SRNG_RING_ID_LMAC1_ID_START)
+#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \
+ HAL_SRNG_NUM_LMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ u8 lmac_ring;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list.
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
+
+struct ath11k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/**
+ * HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath11k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ const struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
+ int num_shadow_reg_configured;
+};
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seqtype);
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab);
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
+u32 ath11k_hal_ce_dst_status_get_length(void *buf);
+int ath11k_hal_srng_get_entrysize(u32 ring_type);
+int ath11k_hal_srng_get_max_entries(u32 ring_type);
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
new file mode 100644
index 000000000000..5e200380cca4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -0,0 +1,2468 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_HAL_DESC_H
+#define ATH11K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(10, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 11)
+
+struct ath11k_buffer_addr {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+/* ath11k_buffer_addr
+ *
+ * info0
+ * Address (lower 32 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * addr
+ * Address (upper 8 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_MACRX_CBF_READ_REQUEST = 8 /* 0x8 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 9 /* 0x9 */,
+ HAL_MACRX_EXPECT_NDP_RECEPTION = 10 /* 0xa */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 11 /* 0xb */,
+ HAL_MACRX_NDP_TIMEOUT = 12 /* 0xc */,
+ HAL_MACRX_ABORT_ACK = 13 /* 0xd */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 14 /* 0xe */,
+ HAL_MACRX_CHAIN_MASK = 15 /* 0xf */,
+ HAL_MACRX_NAP_USER = 16 /* 0x10 */,
+ HAL_MACRX_ABORT_REQUEST = 17 /* 0x11 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 18 /* 0x12 */,
+ HAL_PHYTX_ABORT_ACK = 19 /* 0x13 */,
+ HAL_PHYTX_ABORT_REQUEST = 20 /* 0x14 */,
+ HAL_PHYTX_PKT_END = 21 /* 0x15 */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 22 /* 0x16 */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 23 /* 0x17 */,
+ HAL_PHYTX_DATA_REQUEST = 24 /* 0x18 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 25 /* 0x19 */,
+ HAL_PHYTX_NAP_ACK = 26 /* 0x1a */,
+ HAL_PHYTX_NAP_DONE = 27 /* 0x1b */,
+ HAL_PHYTX_OFF_ACK = 28 /* 0x1c */,
+ HAL_PHYTX_ON_ACK = 29 /* 0x1d */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 30 /* 0x1e */,
+ HAL_PHYTX_DEBUG16 = 31 /* 0x1f */,
+ HAL_MACTX_ABORT_REQUEST = 32 /* 0x20 */,
+ HAL_MACTX_ABORT_ACK = 33 /* 0x21 */,
+ HAL_MACTX_PKT_END = 34 /* 0x22 */,
+ HAL_MACTX_PRE_PHY_DESC = 35 /* 0x23 */,
+ HAL_MACTX_BF_PARAMS_COMMON = 36 /* 0x24 */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 37 /* 0x25 */,
+ HAL_MACTX_PREFETCH_CV = 38 /* 0x26 */,
+ HAL_MACTX_USER_DESC_COMMON = 39 /* 0x27 */,
+ HAL_MACTX_USER_DESC_PER_USER = 40 /* 0x28 */,
+ HAL_EXAMPLE_USER_TLV_16 = 41 /* 0x29 */,
+ HAL_EXAMPLE_TLV_16 = 42 /* 0x2a */,
+ HAL_MACTX_PHY_OFF = 43 /* 0x2b */,
+ HAL_MACTX_PHY_ON = 44 /* 0x2c */,
+ HAL_MACTX_SYNTH_OFF = 45 /* 0x2d */,
+ HAL_MACTX_EXPECT_CBF_COMMON = 46 /* 0x2e */,
+ HAL_MACTX_EXPECT_CBF_PER_USER = 47 /* 0x2f */,
+ HAL_MACTX_PHY_DESC = 48 /* 0x30 */,
+ HAL_MACTX_L_SIG_A = 49 /* 0x31 */,
+ HAL_MACTX_L_SIG_B = 50 /* 0x32 */,
+ HAL_MACTX_HT_SIG = 51 /* 0x33 */,
+ HAL_MACTX_VHT_SIG_A = 52 /* 0x34 */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 53 /* 0x35 */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 54 /* 0x36 */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 55 /* 0x37 */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 56 /* 0x38 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 57 /* 0x39 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 58 /* 0x3a */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 59 /* 0x3b */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 60 /* 0x3c */,
+ HAL_MACTX_SERVICE = 61 /* 0x3d */,
+ HAL_MACTX_HE_SIG_A_SU = 62 /* 0x3e */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 63 /* 0x3f */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 64 /* 0x40 */,
+ HAL_MACTX_HE_SIG_B1_MU = 65 /* 0x41 */,
+ HAL_MACTX_HE_SIG_B2_MU = 66 /* 0x42 */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 67 /* 0x43 */,
+ HAL_MACTX_DELETE_CV = 68 /* 0x44 */,
+ HAL_MACTX_MU_UPLINK_COMMON = 69 /* 0x45 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 70 /* 0x46 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 71 /* 0x47 */,
+ HAL_MACTX_PHY_NAP = 72 /* 0x48 */,
+ HAL_MACTX_DEBUG = 73 /* 0x49 */,
+ HAL_PHYRX_ABORT_ACK = 74 /* 0x4a */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 75 /* 0x4b */,
+ HAL_PHYRX_RSSI_LEGACY = 76 /* 0x4c */,
+ HAL_PHYRX_RSSI_HT = 77 /* 0x4d */,
+ HAL_PHYRX_USER_INFO = 78 /* 0x4e */,
+ HAL_PHYRX_PKT_END = 79 /* 0x4f */,
+ HAL_PHYRX_DEBUG = 80 /* 0x50 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 81 /* 0x51 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 82 /* 0x52 */,
+ HAL_PHYRX_L_SIG_A = 83 /* 0x53 */,
+ HAL_PHYRX_L_SIG_B = 84 /* 0x54 */,
+ HAL_PHYRX_HT_SIG = 85 /* 0x55 */,
+ HAL_PHYRX_VHT_SIG_A = 86 /* 0x56 */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 87 /* 0x57 */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 88 /* 0x58 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 89 /* 0x59 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 90 /* 0x5a */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 91 /* 0x5b */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 92 /* 0x5c */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 93 /* 0x5d */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 94 /* 0x5e */,
+ HAL_PHYRX_HE_SIG_A_SU = 95 /* 0x5f */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 96 /* 0x60 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 97 /* 0x61 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 98 /* 0x62 */,
+ HAL_PHYRX_HE_SIG_B2_MU = 99 /* 0x63 */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 100 /* 0x64 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 101 /* 0x65 */,
+ HAL_PHYRX_COMMON_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_DATA_DONE = 103 /* 0x67 */,
+ HAL_RECEIVE_RSSI_INFO = 104 /* 0x68 */,
+ HAL_RECEIVE_USER_INFO = 105 /* 0x69 */,
+ HAL_MIMO_CONTROL_INFO = 106 /* 0x6a */,
+ HAL_RX_LOCATION_INFO = 107 /* 0x6b */,
+ HAL_COEX_TX_REQ = 108 /* 0x6c */,
+ HAL_DUMMY = 109 /* 0x6d */,
+ HAL_RX_TIMING_OFFSET_INFO = 110 /* 0x6e */,
+ HAL_EXAMPLE_TLV_32_NAME = 111 /* 0x6f */,
+ HAL_MPDU_LIMIT = 112 /* 0x70 */,
+ HAL_NA_LENGTH_END = 113 /* 0x71 */,
+ HAL_OLE_BUF_STATUS = 114 /* 0x72 */,
+ HAL_PCU_PPDU_SETUP_DONE = 115 /* 0x73 */,
+ HAL_PCU_PPDU_SETUP_END = 116 /* 0x74 */,
+ HAL_PCU_PPDU_SETUP_INIT = 117 /* 0x75 */,
+ HAL_PCU_PPDU_SETUP_START = 118 /* 0x76 */,
+ HAL_PDG_FES_SETUP = 119 /* 0x77 */,
+ HAL_PDG_RESPONSE = 120 /* 0x78 */,
+ HAL_PDG_TX_REQ = 121 /* 0x79 */,
+ HAL_SCH_WAIT_INSTR = 122 /* 0x7a */,
+ HAL_SCHEDULER_TLV = 123 /* 0x7b */,
+ HAL_TQM_FLOW_EMPTY_STATUS = 124 /* 0x7c */,
+ HAL_TQM_FLOW_NOT_EMPTY_STATUS = 125 /* 0x7d */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 126 /* 0x7e */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 127 /* 0x7f */,
+ HAL_TQM_GEN_MPDUS = 128 /* 0x80 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 129 /* 0x81 */,
+ HAL_TQM_REMOVE_MPDU = 130 /* 0x82 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 131 /* 0x83 */,
+ HAL_TQM_REMOVE_MSDU = 132 /* 0x84 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 133 /* 0x85 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 134 /* 0x86 */,
+ HAL_TQM_WRITE_CMD = 135 /* 0x87 */,
+ HAL_OFDMA_TRIGGER_DETAILS = 136 /* 0x88 */,
+ HAL_TX_DATA = 137 /* 0x89 */,
+ HAL_TX_FES_SETUP = 138 /* 0x8a */,
+ HAL_RX_PACKET = 139 /* 0x8b */,
+ HAL_EXPECTED_RESPONSE = 140 /* 0x8c */,
+ HAL_TX_MPDU_END = 141 /* 0x8d */,
+ HAL_TX_MPDU_START = 142 /* 0x8e */,
+ HAL_TX_MSDU_END = 143 /* 0x8f */,
+ HAL_TX_MSDU_START = 144 /* 0x90 */,
+ HAL_TX_SW_MODE_SETUP = 145 /* 0x91 */,
+ HAL_TXPCU_BUFFER_STATUS = 146 /* 0x92 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 147 /* 0x93 */,
+ HAL_DATA_TO_TIME_CONFIG = 148 /* 0x94 */,
+ HAL_EXAMPLE_USER_TLV_32 = 149 /* 0x95 */,
+ HAL_MPDU_INFO = 150 /* 0x96 */,
+ HAL_PDG_USER_SETUP = 151 /* 0x97 */,
+ HAL_TX_11AH_SETUP = 152 /* 0x98 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 153 /* 0x99 */,
+ HAL_TX_PEER_ENTRY = 154 /* 0x9a */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 155 /* 0x9b */,
+ HAL_EXAMPLE_STRUCT_NAME = 156 /* 0x9c */,
+ HAL_PCU_PPDU_SETUP_END_INFO = 157 /* 0x9d */,
+ HAL_PPDU_RATE_SETTING = 158 /* 0x9e */,
+ HAL_PROT_RATE_SETTING = 159 /* 0x9f */,
+ HAL_RX_MPDU_DETAILS = 160 /* 0xa0 */,
+ HAL_EXAMPLE_USER_TLV_42 = 161 /* 0xa1 */,
+ HAL_RX_MSDU_LINK = 162 /* 0xa2 */,
+ HAL_RX_REO_QUEUE = 163 /* 0xa3 */,
+ HAL_ADDR_SEARCH_ENTRY = 164 /* 0xa4 */,
+ HAL_SCHEDULER_CMD = 165 /* 0xa5 */,
+ HAL_TX_FLUSH = 166 /* 0xa6 */,
+ HAL_TQM_ENTRANCE_RING = 167 /* 0xa7 */,
+ HAL_TX_DATA_WORD = 168 /* 0xa8 */,
+ HAL_TX_MPDU_DETAILS = 169 /* 0xa9 */,
+ HAL_TX_MPDU_LINK = 170 /* 0xaa */,
+ HAL_TX_MPDU_LINK_PTR = 171 /* 0xab */,
+ HAL_TX_MPDU_QUEUE_HEAD = 172 /* 0xac */,
+ HAL_TX_MPDU_QUEUE_EXT = 173 /* 0xad */,
+ HAL_TX_MPDU_QUEUE_EXT_PTR = 174 /* 0xae */,
+ HAL_TX_MSDU_DETAILS = 175 /* 0xaf */,
+ HAL_TX_MSDU_EXTENSION = 176 /* 0xb0 */,
+ HAL_TX_MSDU_FLOW = 177 /* 0xb1 */,
+ HAL_TX_MSDU_LINK = 178 /* 0xb2 */,
+ HAL_TX_MSDU_LINK_ENTRY_PTR = 179 /* 0xb3 */,
+ HAL_RESPONSE_RATE_SETTING = 180 /* 0xb4 */,
+ HAL_TXPCU_BUFFER_BASICS = 181 /* 0xb5 */,
+ HAL_UNIFORM_DESCRIPTOR_HEADER = 182 /* 0xb6 */,
+ HAL_UNIFORM_TQM_CMD_HEADER = 183 /* 0xb7 */,
+ HAL_UNIFORM_TQM_STATUS_HEADER = 184 /* 0xb8 */,
+ HAL_USER_RATE_SETTING = 185 /* 0xb9 */,
+ HAL_WBM_BUFFER_RING = 186 /* 0xba */,
+ HAL_WBM_LINK_DESCRIPTOR_RING = 187 /* 0xbb */,
+ HAL_WBM_RELEASE_RING = 188 /* 0xbc */,
+ HAL_TX_FLUSH_REQ = 189 /* 0xbd */,
+ HAL_RX_MSDU_DETAILS = 190 /* 0xbe */,
+ HAL_TQM_WRITE_CMD_STATUS = 191 /* 0xbf */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 192 /* 0xc0 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 193 /* 0xc1 */,
+ HAL_EXAMPLE_USER_CTLV_32 = 194 /* 0xc2 */,
+ HAL_TX_FES_STATUS_START = 195 /* 0xc3 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 196 /* 0xc4 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 197 /* 0xc5 */,
+ HAL_TX_FES_STATUS_END = 198 /* 0xc6 */,
+ HAL_RX_TRIG_INFO = 199 /* 0xc7 */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 200 /* 0xc8 */,
+ HAL_RX_FRAME_BITMAP_REQ = 201 /* 0xc9 */,
+ HAL_RX_FRAME_BITMAP_ACK = 202 /* 0xca */,
+ HAL_COEX_RX_STATUS = 203 /* 0xcb */,
+ HAL_RX_START_PARAM = 204 /* 0xcc */,
+ HAL_RX_PPDU_START = 205 /* 0xcd */,
+ HAL_RX_PPDU_END = 206 /* 0xce */,
+ HAL_RX_MPDU_START = 207 /* 0xcf */,
+ HAL_RX_MPDU_END = 208 /* 0xd0 */,
+ HAL_RX_MSDU_START = 209 /* 0xd1 */,
+ HAL_RX_MSDU_END = 210 /* 0xd2 */,
+ HAL_RX_ATTENTION = 211 /* 0xd3 */,
+ HAL_RECEIVED_RESPONSE_INFO = 212 /* 0xd4 */,
+ HAL_RX_PHY_SLEEP = 213 /* 0xd5 */,
+ HAL_RX_HEADER = 214 /* 0xd6 */,
+ HAL_RX_PEER_ENTRY = 215 /* 0xd7 */,
+ HAL_RX_FLUSH = 216 /* 0xd8 */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 217 /* 0xd9 */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 218 /* 0xda */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 219 /* 0xdb */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 220 /* 0xdc */,
+ HAL_TX_CBF_INFO = 221 /* 0xdd */,
+ HAL_PCU_PPDU_SETUP_USER = 222 /* 0xde */,
+ HAL_RX_MPDU_PCU_START = 223 /* 0xdf */,
+ HAL_RX_PM_INFO = 224 /* 0xe0 */,
+ HAL_RX_USER_PPDU_END = 225 /* 0xe1 */,
+ HAL_RX_PRE_PPDU_START = 226 /* 0xe2 */,
+ HAL_RX_PREAMBLE = 227 /* 0xe3 */,
+ HAL_TX_FES_SETUP_COMPLETE = 228 /* 0xe4 */,
+ HAL_TX_LAST_MPDU_FETCHED = 229 /* 0xe5 */,
+ HAL_TXDMA_STOP_REQUEST = 230 /* 0xe6 */,
+ HAL_RXPCU_SETUP = 231 /* 0xe7 */,
+ HAL_RXPCU_USER_SETUP = 232 /* 0xe8 */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 233 /* 0xe9 */,
+ HAL_TQM_ACKED_MPDU = 234 /* 0xea */,
+ HAL_COEX_TX_RESP = 235 /* 0xeb */,
+ HAL_COEX_TX_STATUS = 236 /* 0xec */,
+ HAL_MACTX_COEX_PHY_CTRL = 237 /* 0xed */,
+ HAL_COEX_STATUS_BROADCAST = 238 /* 0xee */,
+ HAL_RESPONSE_START_STATUS = 239 /* 0xef */,
+ HAL_RESPONSE_END_STATUS = 240 /* 0xf0 */,
+ HAL_CRYPTO_STATUS = 241 /* 0xf1 */,
+ HAL_RECEIVED_TRIGGER_INFO = 242 /* 0xf2 */,
+ HAL_REO_ENTRANCE_RING = 243 /* 0xf3 */,
+ HAL_RX_MPDU_LINK = 244 /* 0xf4 */,
+ HAL_COEX_TX_STOP_CTRL = 245 /* 0xf5 */,
+ HAL_RX_PPDU_ACK_REPORT = 246 /* 0xf6 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 247 /* 0xf7 */,
+ HAL_SCH_COEX_STATUS = 248 /* 0xf8 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 249 /* 0xf9 */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 250 /* 0xfa */,
+ HAL_TX_FES_STATUS_PROT = 251 /* 0xfb */,
+ HAL_TX_FES_STATUS_START_PPDU = 252 /* 0xfc */,
+ HAL_TX_FES_STATUS_START_PROT = 253 /* 0xfd */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 254 /* 0xfe */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 255 /* 0xff */,
+ HAL_TX_MPDU_COUNT_TRANSFER_END = 256 /* 0x100 */,
+ HAL_WHO_ANCHOR_OFFSET = 257 /* 0x101 */,
+ HAL_WHO_ANCHOR_VALUE = 258 /* 0x102 */,
+ HAL_WHO_CCE_INFO = 259 /* 0x103 */,
+ HAL_WHO_COMMIT = 260 /* 0x104 */,
+ HAL_WHO_COMMIT_DONE = 261 /* 0x105 */,
+ HAL_WHO_FLUSH = 262 /* 0x106 */,
+ HAL_WHO_L2_LLC = 263 /* 0x107 */,
+ HAL_WHO_L2_PAYLOAD = 264 /* 0x108 */,
+ HAL_WHO_L3_CHECKSUM = 265 /* 0x109 */,
+ HAL_WHO_L3_INFO = 266 /* 0x10a */,
+ HAL_WHO_L4_CHECKSUM = 267 /* 0x10b */,
+ HAL_WHO_L4_INFO = 268 /* 0x10c */,
+ HAL_WHO_MSDU = 269 /* 0x10d */,
+ HAL_WHO_MSDU_MISC = 270 /* 0x10e */,
+ HAL_WHO_PACKET_DATA = 271 /* 0x10f */,
+ HAL_WHO_PACKET_HDR = 272 /* 0x110 */,
+ HAL_WHO_PPDU_END = 273 /* 0x111 */,
+ HAL_WHO_PPDU_START = 274 /* 0x112 */,
+ HAL_WHO_TSO = 275 /* 0x113 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 276 /* 0x114 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 277 /* 0x115 */,
+ HAL_WHO_WMAC_IV = 278 /* 0x116 */,
+ HAL_MPDU_INFO_END = 279 /* 0x117 */,
+ HAL_MPDU_INFO_BITMAP = 280 /* 0x118 */,
+ HAL_TX_QUEUE_EXTENSION = 281 /* 0x119 */,
+ HAL_RX_PEER_ENTRY_DETAILS = 282 /* 0x11a */,
+ HAL_RX_REO_QUEUE_REFERENCE = 283 /* 0x11b */,
+ HAL_RX_REO_QUEUE_EXT = 284 /* 0x11c */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 285 /* 0x11d */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 286 /* 0x11e */,
+ HAL_TQM_ACKED_MPDU_STATUS = 287 /* 0x11f */,
+ HAL_TQM_ADD_MSDU_STATUS = 288 /* 0x120 */,
+ HAL_RX_MPDU_LINK_PTR = 289 /* 0x121 */,
+ HAL_REO_DESTINATION_RING = 290 /* 0x122 */,
+ HAL_TQM_LIST_GEN_DONE = 291 /* 0x123 */,
+ HAL_WHO_TERMINATE = 292 /* 0x124 */,
+ HAL_TX_LAST_MPDU_END = 293 /* 0x125 */,
+ HAL_TX_CV_DATA = 294 /* 0x126 */,
+ HAL_TCL_ENTRANCE_FROM_PPE_RING = 295 /* 0x127 */,
+ HAL_PPDU_TX_END = 296 /* 0x128 */,
+ HAL_PROT_TX_END = 297 /* 0x129 */,
+ HAL_PDG_RESPONSE_RATE_SETTING = 298 /* 0x12a */,
+ HAL_MPDU_INFO_GLOBAL_END = 299 /* 0x12b */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 300 /* 0x12c */,
+ HAL_RX_PPDU_END_USER_STATS = 301 /* 0x12d */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 302 /* 0x12e */,
+ HAL_NO_ACK_REPORT = 303 /* 0x12f */,
+ HAL_ACK_REPORT = 304 /* 0x130 */,
+ HAL_UNIFORM_REO_CMD_HEADER = 305 /* 0x131 */,
+ HAL_REO_GET_QUEUE_STATS = 306 /* 0x132 */,
+ HAL_REO_FLUSH_QUEUE = 307 /* 0x133 */,
+ HAL_REO_FLUSH_CACHE = 308 /* 0x134 */,
+ HAL_REO_UNBLOCK_CACHE = 309 /* 0x135 */,
+ HAL_UNIFORM_REO_STATUS_HEADER = 310 /* 0x136 */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 311 /* 0x137 */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 312 /* 0x138 */,
+ HAL_REO_FLUSH_CACHE_STATUS = 313 /* 0x139 */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 314 /* 0x13a */,
+ HAL_TQM_FLUSH_CACHE = 315 /* 0x13b */,
+ HAL_TQM_UNBLOCK_CACHE = 316 /* 0x13c */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 317 /* 0x13d */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 318 /* 0x13e */,
+ HAL_RX_PPDU_END_STATUS_DONE = 319 /* 0x13f */,
+ HAL_RX_STATUS_BUFFER_DONE = 320 /* 0x140 */,
+ HAL_BUFFER_ADDR_INFO = 321 /* 0x141 */,
+ HAL_RX_MSDU_DESC_INFO = 322 /* 0x142 */,
+ HAL_RX_MPDU_DESC_INFO = 323 /* 0x143 */,
+ HAL_TCL_DATA_CMD = 324 /* 0x144 */,
+ HAL_TCL_GSE_CMD = 325 /* 0x145 */,
+ HAL_TCL_EXIT_BASE = 326 /* 0x146 */,
+ HAL_TCL_COMPACT_EXIT_RING = 327 /* 0x147 */,
+ HAL_TCL_REGULAR_EXIT_RING = 328 /* 0x148 */,
+ HAL_TCL_EXTENDED_EXIT_RING = 329 /* 0x149 */,
+ HAL_UPLINK_COMMON_INFO = 330 /* 0x14a */,
+ HAL_UPLINK_USER_SETUP_INFO = 331 /* 0x14b */,
+ HAL_TX_DATA_SYNC = 332 /* 0x14c */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 333 /* 0x14d */,
+ HAL_TCL_STATUS_RING = 334 /* 0x14e */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 335 /* 0x14f */,
+ HAL_TQM_SYNC_CMD = 336 /* 0x150 */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 337 /* 0x151 */,
+ HAL_TQM_SYNC_CMD_STATUS = 338 /* 0x152 */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 339 /* 0x153 */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 340 /* 0x154 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 341 /* 0x155 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 342 /* 0x156 */,
+ HAL_REO_TO_PPE_RING = 343 /* 0x157 */,
+ HAL_RX_MPDU_INFO = 344 /* 0x158 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 345 /* 0x159 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 346 /* 0x15a */,
+ HAL_EXAMPLE_USER_TLV_32_NAME = 347 /* 0x15b */,
+ HAL_RX_PPDU_START_USER_INFO = 348 /* 0x15c */,
+ HAL_RX_RXPCU_CLASSIFICATION_OVERVIEW = 349 /* 0x15d */,
+ HAL_RX_RING_MASK = 350 /* 0x15e */,
+ HAL_WHO_CLASSIFY_INFO = 351 /* 0x15f */,
+ HAL_TXPT_CLASSIFY_INFO = 352 /* 0x160 */,
+ HAL_RXPT_CLASSIFY_INFO = 353 /* 0x161 */,
+ HAL_TX_FLOW_SEARCH_ENTRY = 354 /* 0x162 */,
+ HAL_RX_FLOW_SEARCH_ENTRY = 355 /* 0x163 */,
+ HAL_RECEIVED_TRIGGER_INFO_DETAILS = 356 /* 0x164 */,
+ HAL_COEX_MAC_NAP = 357 /* 0x165 */,
+ HAL_MACRX_ABORT_REQUEST_INFO = 358 /* 0x166 */,
+ HAL_MACTX_ABORT_REQUEST_INFO = 359 /* 0x167 */,
+ HAL_PHYRX_ABORT_REQUEST_INFO = 360 /* 0x168 */,
+ HAL_PHYTX_ABORT_REQUEST_INFO = 361 /* 0x169 */,
+ HAL_RXPCU_PPDU_END_INFO = 362 /* 0x16a */,
+ HAL_WHO_MESH_CONTROL = 363 /* 0x16b */,
+ HAL_L_SIG_A_INFO = 364 /* 0x16c */,
+ HAL_L_SIG_B_INFO = 365 /* 0x16d */,
+ HAL_HT_SIG_INFO = 366 /* 0x16e */,
+ HAL_VHT_SIG_A_INFO = 367 /* 0x16f */,
+ HAL_VHT_SIG_B_SU20_INFO = 368 /* 0x170 */,
+ HAL_VHT_SIG_B_SU40_INFO = 369 /* 0x171 */,
+ HAL_VHT_SIG_B_SU80_INFO = 370 /* 0x172 */,
+ HAL_VHT_SIG_B_SU160_INFO = 371 /* 0x173 */,
+ HAL_VHT_SIG_B_MU20_INFO = 372 /* 0x174 */,
+ HAL_VHT_SIG_B_MU40_INFO = 373 /* 0x175 */,
+ HAL_VHT_SIG_B_MU80_INFO = 374 /* 0x176 */,
+ HAL_VHT_SIG_B_MU160_INFO = 375 /* 0x177 */,
+ HAL_SERVICE_INFO = 376 /* 0x178 */,
+ HAL_HE_SIG_A_SU_INFO = 377 /* 0x179 */,
+ HAL_HE_SIG_A_MU_DL_INFO = 378 /* 0x17a */,
+ HAL_HE_SIG_A_MU_UL_INFO = 379 /* 0x17b */,
+ HAL_HE_SIG_B1_MU_INFO = 380 /* 0x17c */,
+ HAL_HE_SIG_B2_MU_INFO = 381 /* 0x17d */,
+ HAL_HE_SIG_B2_OFDMA_INFO = 382 /* 0x17e */,
+ HAL_PDG_SW_MODE_BW_START = 383 /* 0x17f */,
+ HAL_PDG_SW_MODE_BW_END = 384 /* 0x180 */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 385 /* 0x181 */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 386 /* 0x182 */,
+ HAL_SCHEDULER_END = 387 /* 0x183 */,
+ HAL_PEER_TABLE_ENTRY = 388 /* 0x184 */,
+ HAL_SW_PEER_INFO = 389 /* 0x185 */,
+ HAL_RXOLE_CCE_CLASSIFY_INFO = 390 /* 0x186 */,
+ HAL_TCL_CCE_CLASSIFY_INFO = 391 /* 0x187 */,
+ HAL_RXOLE_CCE_INFO = 392 /* 0x188 */,
+ HAL_TCL_CCE_INFO = 393 /* 0x189 */,
+ HAL_TCL_CCE_SUPERRULE = 394 /* 0x18a */,
+ HAL_CCE_RULE = 395 /* 0x18b */,
+ HAL_RX_PPDU_START_DROPPED = 396 /* 0x18c */,
+ HAL_RX_PPDU_END_DROPPED = 397 /* 0x18d */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 398 /* 0x18e */,
+ HAL_RX_MPDU_START_DROPPED = 399 /* 0x18f */,
+ HAL_RX_MSDU_START_DROPPED = 400 /* 0x190 */,
+ HAL_RX_MSDU_END_DROPPED = 401 /* 0x191 */,
+ HAL_RX_MPDU_END_DROPPED = 402 /* 0x192 */,
+ HAL_RX_ATTENTION_DROPPED = 403 /* 0x193 */,
+ HAL_TXPCU_USER_SETUP = 404 /* 0x194 */,
+ HAL_RXPCU_USER_SETUP_EXT = 405 /* 0x195 */,
+ HAL_CE_SRC_DESC = 406 /* 0x196 */,
+ HAL_CE_STAT_DESC = 407 /* 0x197 */,
+ HAL_RXOLE_CCE_SUPERRULE = 408 /* 0x198 */,
+ HAL_TX_RATE_STATS_INFO = 409 /* 0x199 */,
+ HAL_CMD_PART_0_END = 410 /* 0x19a */,
+ HAL_MACTX_SYNTH_ON = 411 /* 0x19b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 412 /* 0x19c */,
+ HAL_TQM_MPDU_GLOBAL_START = 413 /* 0x19d */,
+ HAL_EXAMPLE_TLV_32 = 414 /* 0x19e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 415 /* 0x19f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 416 /* 0x1a0 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 417 /* 0x1a1 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 418 /* 0x1a2 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 419 /* 0x1a3 */,
+ HAL_CE_DST_DESC = 420 /* 0x1a4 */,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ u32 tl;
+ u8 value[0];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_SEQ_NUM GENMASK(19, 8)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(20)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(21)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(22)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(23)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(24)
+#define RX_MPDU_DESC_INFO0_VALID_SA BIT(25)
+#define RX_MPDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(26)
+#define RX_MPDU_DESC_INFO0_VALID_DA BIT(27)
+#define RX_MPDU_DESC_INFO0_DA_MCBC BIT(28)
+#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+
+struct rx_mpdu_desc {
+ u32 info0; /* %RX_MPDU_DESC_INFO */
+ u32 meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * mpdu_sequence_number
+ * The field can have two different meanings based on the setting
+ * of field 'bar_frame'. If 'bar_frame' is set, it means the MPDU
+ * start sequence number from the BAR frame otherwise it means
+ * the MPDU sequence number of the received frame.
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for all MSDUs in this MPDU.
+ *
+ * sa_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC source address search due to the expiration of search timer.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for all MSDUs in this MPDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates at least one of
+ * the DA addresses is a Multicast or Broadcast address.
+ *
+ * da_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC destination address search due to the expiration of search
+ * timer.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU.
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_REO_DEST_IND GENMASK(21, 17)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(22)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(23)
+#define RX_MSDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(24)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(25)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(26)
+#define RX_MSDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(27)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (FIELD_GET(RX_MSDU_DESC_INFO0_MSDU_LENGTH, (val)))
+
+struct rx_msdu_desc {
+ u32 info0;
+ u32 rsvd0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefor contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates, an unsuccessful MAC source address search due to
+ * the expiration of search timer for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * da_idx_timeout
+ * Indicates, an unsuccessful MAC destination address search due
+ * to the expiration of search timer fot this MSDU.
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(8)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(10, 9)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(15, 11)
+#define HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM GENMASK(31, 16)
+
+#define HAL_REO_DEST_RING_INFO1_REORDER_INFO_VALID BIT(0)
+#define HAL_REO_DEST_RING_INFO1_REORDER_OPCODE GENMASK(4, 1)
+#define HAL_REO_DEST_RING_INFO1_REORDER_SLOT_IDX GENMASK(12, 5)
+
+#define HAL_REO_DEST_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_DEST_RING_INFO1_ */
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * rx_queue_num
+ * Indicates the REO MPDU reorder queue id from which this frame
+ * originated.
+ *
+ * reorder_info_valid
+ * When set, REO has been instructed to not perform the actual
+ * re-ordering of frames for this queue, but just to insert
+ * the reorder opcodes.
+ *
+ * reorder_opcode
+ * Field is valid when 'reorder_info_valid' is set. This field is
+ * always valid for debug purpose as well.
+ *
+ * reorder_slot_idx
+ * Valid only when 'reorder_info_valid' is set.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+
+struct hal_reo_entrance_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ u32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 desc_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 cache_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_EPD BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO0_SRC_BUF_SWAP BIT(8)
+#define HAL_TCL_DATA_CMD_INFO0_LNK_META_SWAP BIT(9)
+#define HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE GENMASK(13, 12)
+#define HAL_TCL_DATA_CMD_INFO0_ADDR_EN GENMASK(15, 14)
+#define HAL_TCL_DATA_CMD_INFO0_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO1_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
+#define HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
+
+#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0)
+#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6)
+#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26)
+
+#define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * epd
+ * When this bit is set then input packet is an EPD type.
+ *
+ * encap_type
+ * Indicates the encapsulation that HW will perform. Values are
+ * defined in enum %HAL_TCL_ENCAP_TYPE_.
+ *
+ * encrypt_type
+ * Field only valid for encap_type: RAW
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * src_buffer_swap
+ * Treats source memory (packet buffer) organization as big-endian.
+ * 1'b0: Source memory is little endian
+ * 1'b1: Source memory is big endian
+ *
+ * link_meta_swap
+ * Treats link descriptor and Metadata as big-endian.
+ * 1'b0: memory is little endian
+ * 1'b1: memory is big endian
+ *
+ * search_type
+ * Search type select
+ * 0 - Normal search, 1 - Index based address search,
+ * 2 - Index based flow search
+ *
+ * addrx_en
+ * addry_en
+ * Address X/Y search enable in ASE correspondingly.
+ * 1'b0: Search disable
+ * 1'b1: Search Enable
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * mesh_enable
+ * For raw WiFi frames, this indicates transmission to a mesh STA,
+ * enabling the interpretation of the 'Mesh Control Present' bit
+ * (bit 8) of QoS Control.
+ * For native WiFi frames, this indicates that a 'Mesh Control'
+ * field is present between the header and the LLC.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * lmac_id
+ * TCL uses this LMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given LMAC_ID
+ *
+ * If LMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any MAC
+ *
+ * dscp_tid_table_num
+ * DSCP to TID mapping table number that need to be used
+ * for the MSDU.
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+enum hal_tcl_gse_ctrl {
+ HAL_TCL_GSE_CTRL_RD_STAT,
+ HAL_TCL_GSE_CTRL_SRCH_DIS,
+ HAL_TCL_GSE_CTRL_WR_BK_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_ALL,
+ HAL_TCL_GSE_CTRL_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_CLR_STAT_SINGLE,
+};
+
+/* hal_tcl_gse_ctrl
+ *
+ * rd_stat
+ * Report or Read statistics
+ * srch_dis
+ * Search disable. Report only Hash.
+ * wr_bk_single
+ * Write Back single entry
+ * wr_bk_all
+ * Write Back entire cache entry
+ * inval_single
+ * Invalidate single cache entry
+ * inval_all
+ * Invalidate entire cache
+ * wr_bk_inval_single
+ * Write back and invalidate single entry in cache
+ * wr_bk_inval_all
+ * Write back and invalidate entire cache
+ * clr_stat_single
+ * Clear statistics for single entry
+ */
+
+#define HAL_TCL_GSE_CMD_INFO0_CTRL_BUF_ADDR_HI GENMASK(7, 0)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_CTRL GENMASK(11, 8)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_SEL BIT(12)
+#define HAL_TCL_GSE_CMD_INFO0_STATUS_DEST_RING_ID BIT(13)
+#define HAL_TCL_GSE_CMD_INFO0_SWAP BIT(14)
+
+#define HAL_TCL_GSE_CMD_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_TCL_GSE_CMD_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_gse_cmd {
+ u32 ctrl_buf_addr_lo;
+ u32 info0;
+ u32 meta_data[2];
+ u32 rsvd0[2];
+ u32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * status_destination_ring_id
+ * TCL status ring to which the GSE status needs to be send.
+ *
+ * swap
+ * Bit to enable byte swapping of contents of buffer.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+#define HAL_TCL_STATUS_RING_INFO0_GSE_CTRL GENMASK(3, 0)
+#define HAL_TCL_STATUS_RING_INFO0_GSE_SEL BIT(4)
+#define HAL_TCL_STATUS_RING_INFO0_CACHE_OP_RES GENMASK(6, 5)
+#define HAL_TCL_STATUS_RING_INFO0_MSDU_CNT GENMASK(31, 8)
+
+#define HAL_TCL_STATUS_RING_INFO1_HASH_IDX GENMASK(19, 0)
+
+#define HAL_TCL_STATUS_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_TCL_STATUS_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_status_ring {
+ u32 info0;
+ u32 msdu_byte_count;
+ u32 msdu_timestamp;
+ u32 meta_data[2];
+ u32 info1;
+ u32 rsvd0;
+ u32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * cache_op_res
+ * Cache operation result. Values are defined in enum
+ * %HAL_TCL_CACHE_OP_RES_.
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ * hash_indx
+ * Hash value of the entry in case of search failed or disabled.
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ u32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ u32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/*
+ * hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(7, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ u32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ u32 toeplitz_hash0;
+ u32 toeplitz_hash1;
+ u32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(2, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(6, 3)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(7)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(10, 9)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(14, 11)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(15)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(27, 16)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ u32 info0;
+ u32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath11k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+
+#define HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_INFO2_SW_REL_DETAILS_VALID BIT(8)
+#define HAL_WBM_RELEASE_INFO2_FIRST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_INFO2_LAST_MSDU BIT(10)
+#define HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU BIT(11)
+#define HAL_WBM_RELEASE_INFO2_FW_TX_NOTIF_FRAME BIT(12)
+#define HAL_WBM_RELEASE_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_INFO3_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_REINJ_REASON GENMASK(16, 13)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_EXP_FRAME BIT(17)
+
+struct hal_wbm_release_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ u32 info3;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * bm_action
+ * Field only valid when the field return_buffer_manager in
+ * Released_buff_or_desc_addr_info indicates:
+ * WBM_IDLE_BUF_LIST / WBM_IDLE_DESC_LIST
+ * Values are defined in enum %HAL_WBM_REL_BM_ACT_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * first_msdu_index
+ * Field only valid for the bm_action release_msdu_list. The index of the
+ * first MSDU in an MSDU link descriptor all belonging to the same MPDU.
+ *
+ * tqm_release_reason
+ * Field only valid when Release_source_module is set to release_source_TQM
+ * Release reasons are defined in enum %HAL_WBM_TQM_REL_REASON_.
+ *
+ * rxdma_push_reason
+ * reo_push_reason
+ * Indicates why rxdma/reo pushed the frame to this ring and values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Field only valid when 'rxdma_push_reason' set to 'error_detected'.
+ * Values are defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * reo_error_code
+ * Field only valid when 'reo_push_reason' set to 'error_detected'. Values
+ * are defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * tqm_status_number
+ * The value in this field is equal to tqm_cmd_number in TQM command. It is
+ * used to correlate the statu with TQM commands. Only valid when
+ * release_source_module is TQM.
+ *
+ * transmit_count
+ * The number of times the frame has been transmitted, valid only when
+ * release source in TQM.
+ *
+ * ack_frame_rssi
+ * This field is only valid when the source is TQM. If this frame is
+ * removed as the result of the reception of an ACK or BA, this field
+ * indicates the RSSI of the received ACK or BA frame.
+ *
+ * sw_release_details_valid
+ * This is set when WMB got a 'release_msdu_list' command from TQM and
+ * return buffer manager is not WMB. WBM will then de-aggregate all MSDUs
+ * and pass them one at a time on to the 'buffer owner'.
+ *
+ * first_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the first MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * last_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the last MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * msdu_part_of_amsdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU was part of an A-MSDU in MPDU
+ *
+ * fw_tx_notify_frame
+ * Field only valid when SW_release_details_valid is set.
+ *
+ * buffer_timestamp
+ * Field only valid when SW_release_details_valid is set.
+ * This is the Buffer_timestamp field from the
+ * Timestamp in units of 1024 us
+ *
+ * struct hal_tx_rate_stats rate_stats
+ * Details for command execution tracking purposes.
+ *
+ * sw_peer_id
+ * tid
+ * Field only valid when Release_source_module is set to
+ * release_source_TQM
+ *
+ * 1) Release of msdu buffer due to drop_frame = 1. Flow is
+ * not fetched and hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 2) Release of msdu buffer due to Flow is not fetched and
+ * hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 3) Release of msdu link due to remove_mpdu or acked_mpdu
+ * command.
+ *
+ * buffer_or_desc_type = e_num1
+ * msdu_link_descriptortqm_release_reason can be:e_num 1
+ * tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx
+ * e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged
+ *
+ * This field represents the TID from the TX_MSDU_FLOW
+ * descriptor or TX_MPDU_QUEUE descriptor
+ *
+ * rind_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+};
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ u32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath11k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ u32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(18, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(19)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(20)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(24, 23)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(25)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(20, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(21)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(22)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 10)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ u32 rx_queue_num;
+ u32 info0;
+ u32 info1;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 next_aging_queue[2];
+ u32 prev_aging_queue[2];
+ u32 rx_bitmap[8];
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 processed_mpdus;
+ u32 processed_msdus;
+ u32 processed_total_bytes;
+ u32 info5;
+ u32 rsvd[3];
+ struct hal_rx_reo_queue_ext ext_desc[0];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 info0;
+ u32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ u32 info0;
+ u32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to excecute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(19, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K GENMASK(15, 12)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 rx_bitmap[8];
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 num_mpdu_frames;
+ u32 num_msdu_frames;
+ u32 total_bytes;
+ u32 info4;
+ u32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 rsvd0[20];
+ u32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(23, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 rsvd0[17];
+ u32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#endif /* ATH11K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
new file mode 100644
index 000000000000..9e0f8064e427
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "ahb.h"
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+
+static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+ FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset(&desc->queue_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset(&desc->cache_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->cache_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+ cmd->addr_hi);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+ desc->info0 |=
+ FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+ avail_slot);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset(&desc->queue_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+ desc->info1 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+ cmd->rx_queue_num) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+ FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+ FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+ cmd->ba_window_size - 1) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+ FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath11k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -ENOTSUPP;
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
+ binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
+}
+
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+
+ *paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
+ *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
+}
+
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
+ struct hal_rx_msdu_details *msdu;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu->buf_addr_info.info1);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu->buf_addr_info.info0)) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu->buf_addr_info.info1);
+ msdu_cookies++;
+ }
+}
+
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
+ desc->info0);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath11k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath11k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
+
+ return 0;
+}
+
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+
+ type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ wbm_desc->info0);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ wbm_desc->info0);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ if (FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ wbm_desc->buf_addr_info.info1) != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ wbm_desc->buf_addr_info.info1);
+ rel_info->err_rel_src = rel_src;
+ if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
+ wbm_desc->info0);
+ } else {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
+ wbm_desc->info0);
+ }
+
+ rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
+ wbm_desc->info2);
+ rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
+ wbm_desc->info2);
+ return 0;
+}
+
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct ath11k_buffer_addr *buff_addr = desc;
+
+ *paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
+
+ *desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
+}
+
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *dst_desc = desc;
+ struct hal_wbm_release_ring *src_desc = link_desc;
+
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ HAL_WBM_REL_SRC_MODULE_SW) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
+}
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "Queue stats status:\n");
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "ssn %ld cur_idx %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
+ desc->info0),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
+ desc->info0));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
+ desc->info1),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
+ desc->info1));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
+ desc->info2));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
+ desc->info3),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
+ desc->info3));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
+ desc->info4));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "looping count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
+ desc->info5));
+}
+
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *hdr;
+
+ hdr = (struct hal_reo_status_hdr *)tlv->value;
+ *status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
+
+ return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
+}
+
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+ status->u.flush_queue.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
+ desc->info0);
+}
+
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.flush_cache.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.flush_cache.err_code =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
+ desc->info0);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
+ desc->info0);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
+ desc->info0);
+}
+
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.unblock_cache.err_detected =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.unblock_cache.unblock_type =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
+ desc->info0);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.timeout_list.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.timeout_list.list_empty =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
+ desc->info0);
+
+ status->u.timeout_list.release_desc_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
+ desc->info1);
+ status->u.timeout_list.fwd_buf_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
+ desc->info1);
+}
+
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
+ desc->info0);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
+ desc->info1);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
+ desc->info2);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
+ desc->info3);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
+ desc->info4);
+}
+
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->info0);
+}
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq)
+{
+ struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+ qdesc->info0 =
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+ ba_window_size - 1);
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+ qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+ start_seq);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is recevied. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(HAL_REO_CMD);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 =
+ FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
+ entry += entry_size;
+ }
+}
+
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+}
+
+static enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 tlv_tag, u8 *tlv_data)
+{
+ u32 info0, info1;
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ ppdu_info->ppdu_id =
+ FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info0 = __le32_to_cpu(eu_stats->info0);
+ info1 = __le32_to_cpu(eu_stats->info1);
+
+ ppdu_info->tid =
+ ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
+ __le32_to_cpu(eu_stats->info6))) - 1;
+ ppdu_info->tcp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->udp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->other_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->tcp_ack_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->preamble_type =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
+ ppdu_info->num_mpdu_fcs_ok =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
+ info1);
+ ppdu_info->num_mpdu_fcs_err =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
+ info0);
+ break;
+ }
+ case HAL_PHYRX_HT_SIG: {
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+
+ info0 = __le32_to_cpu(ht_sig->info0);
+ info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
+ ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
+ ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
+ info1);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
+ ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
+
+ switch (ppdu_info->mcs) {
+ case 0 ... 7:
+ ppdu_info->nss = 1;
+ break;
+ case 8 ... 15:
+ ppdu_info->nss = 2;
+ break;
+ case 16 ... 23:
+ ppdu_info->nss = 3;
+ break;
+ case 24 ... 31:
+ ppdu_info->nss = 4;
+ break;
+ }
+
+ if (ppdu_info->nss > 1)
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_B: {
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
+ __le32_to_cpu(lsigb->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_A: {
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
+ __le32_to_cpu(lsiga->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_VHT_SIG_A: {
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts;
+ u32 group_id;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
+ info0);
+ ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
+ info1);
+ gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
+ info1);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
+ nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
+ ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
+ info0);
+ ppdu_info->beamformed = info1 &
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
+ group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
+ info0);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type =
+ HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_SU: {
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+ u32 nsts, cp_ltf, dcm;
+
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
+ info0);
+ ppdu_info->bw =
+ FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
+ info0);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
+ ppdu_info->beamformed = info1 &
+ HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
+ dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
+ cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
+ info0);
+ nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+
+ switch (cp_ltf) {
+ case 0:
+ case 1:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case 2:
+ ppdu_info->gi = HAL_RX_GI_1_6_US;
+ break;
+ case 3:
+ if (dcm && ppdu_info->is_stbc)
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ else
+ ppdu_info->gi = HAL_RX_GI_3_2_US;
+ break;
+ }
+
+ ppdu_info->nss = nsts + 1;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_MU_DL: {
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+
+ u32 cp_ltf;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->bw =
+ FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
+ info0);
+ cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
+ info0);
+
+ switch (cp_ltf) {
+ case 0:
+ case 1:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case 2:
+ ppdu_info->gi = HAL_RX_GI_1_6_US;
+ break;
+ case 3:
+ ppdu_info->gi = HAL_RX_GI_3_2_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B1_MU: {
+ /* TODO: Check if resource unit(RU) allocation stats
+ * are required
+ */
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_MU: {
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
+ info0);
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_OFDMA: {
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->beamformed =
+ info0 &
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
+ info0);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ }
+ case HAL_PHYRX_RSSI_LEGACY: {
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB,
+ __le32_to_cpu(rssi->info0));
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+ u16 peer_id;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->info0));
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+ ppdu_info->rx_duration =
+ FIELD_GET(HAL_RX_PPDU_END_DURATION,
+ __le32_to_cpu(ppdu_rx_duration->info0));
+ break;
+ }
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb)
+{
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u8 *ptr = skb->data;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
+ tlv_tag, ptr);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie, void **pp_buf_addr,
+ u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring =
+ (struct hal_reo_entrance_ring *)rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ *sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
new file mode 100644
index 000000000000..bb022c781c48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_RX_H
+#define ATH11K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[0];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
+#define HAL_TLV_STATUS_PPDU_DONE 1
+#define HAL_TLV_STATUS_BUF_DONE 2
+#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 is_stbc;
+ u8 gi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u8 tid;
+ u8 reception_type;
+ u64 rx_duration;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le32 rsvd2[11];
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[35];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+struct hal_rx_mpdu_info {
+ __le32 rsvd0;
+ __le32 info0;
+ __le32 rsvd1[21];
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status);
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager);
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info,
+ u32 *msdu_cnt);
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb);
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
new file mode 100644
index 000000000000..e4aa7e8a1284
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "ahb.h"
+#include "hal.h"
+#include "hal_tx.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *
+ * DSCP TID
+ * 000000 0
+ * 001000 1
+ * 010000 2
+ * 011000 3
+ * 100000 4
+ * 101000 5
+ * 110000 6
+ * 111000 7
+ */
+static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
+
+ tcl_cmd->buf_addr_info.info0 =
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
+ tcl_cmd->buf_addr_info.info1 =
+ FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
+ tcl_cmd->buf_addr_info.info1 |=
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ (ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
+
+ tcl_cmd->info0 =
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
+ ti->encrypt_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
+ ti->search_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
+ ti->addr_search_flags) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
+ ti->meta_data_flags);
+
+ tcl_cmd->info1 = ti->flags0 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
+
+ tcl_cmd->info2 = ti->flags1 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
+
+ tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
+ ti->dscp_tid_tbl_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
+ ti->bss_ast_hash);
+ tcl_cmd->info4 = 0;
+}
+
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
+ int i;
+ u32 value;
+ int cnt = 0;
+
+ ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
+ value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
+ dscp_tid_map[i]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
+ dscp_tid_map[i + 1]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
+ dscp_tid_map[i + 2]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
+ dscp_tid_map[i + 3]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
+ dscp_tid_map[i + 4]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
+ dscp_tid_map[i + 5]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
+ dscp_tid_map[i + 6]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
+ dscp_tid_map[i + 7]);
+ memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
+ cnt += 3;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ int i, entry_size;
+ u8 *desc;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(HAL_TCL_DATA);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ desc = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)desc;
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
+ FIELD_PREP(HAL_TLV_HDR_LEN,
+ sizeof(struct hal_tcl_data_cmd));
+ desc += entry_size;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
new file mode 100644
index 000000000000..ce48a61bfb66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_TX_H
+#define ATH11K_HAL_TX_H
+
+#include "hal_desc.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 dscp_tid_tbl_idx;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ u8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti);
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd);
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
new file mode 100644
index 000000000000..8f54f58b83e6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "ahb.h"
+#include "debug.h"
+
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH11K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc,
+ struct sk_buff *skb)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+}
+
+static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_hdr *hdr;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
+ FIELD_PREP(HTC_HDR_PAYLOADLEN,
+ (skb->len - sizeof(*hdr))) |
+ FIELD_PREP(HTC_HDR_FLAGS,
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath11k_htc_send(struct ath11k_htc *htc,
+ enum ath11k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath11k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath11k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath11k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+ return ret;
+}
+
+static void
+ath11k_htc_process_credit_report(struct ath11k_htc *htc,
+ const struct ath11k_htc_credit_report *report,
+ int len,
+ enum ath11k_htc_ep_id eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath11k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH11K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath11k_htc_ep_id src_eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ int status = 0;
+ struct ath11k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath11k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath11k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH11K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath11k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath11k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath11k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_hdr *hdr;
+ struct ath11k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
+
+ if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
+ ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
+ ATH11K_HTC_FLAG_TRAILER_PRESENT;
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
+ min_len = sizeof(struct ath11k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath11k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath11k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH11K_HTC_EP_0) {
+ struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
+
+ switch (FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)) {
+ case ATH11K_HTC_MSG_READY_ID:
+ case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath11k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH11K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ default:
+ ath11k_warn(ab, "ignoring unsolicited htc ep0 event\n");
+ break;
+ }
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath11k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath11k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH11K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH11K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH11K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH11K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH11K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH11K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_ep *ep;
+ int i;
+
+ for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
+ u16 service_id)
+{
+ u8 i, allocation = 0;
+
+ for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (htc->service_alloc_table[i].service_id == service_id) {
+ allocation =
+ htc->service_alloc_table[i].credit_allocation;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_svc_tx_credits *serv_entry;
+ u32 svc_id[] = {
+ ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_wait_target(struct ath11k_htc *htc)
+{
+ int i, status = 0;
+ struct ath11k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath11k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath11k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
+ credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
+ ready->id_credit_count);
+ credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
+
+ if (message_id != ATH11K_HTC_MSG_READY_ID) {
+ ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath11k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ ath11k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_conn_svc *req_msg;
+ struct ath11k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
+ struct ath11k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH11K_HTC_EP_0;
+ max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath11k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb) {
+ ath11k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath11k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
+ req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
+ conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
+ service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
+ resp_msg->msg_svc_id);
+
+ if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%lx, assigned ep: 0x%lx\n",
+ htc_service_name(service_id),
+ FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
+ FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
+
+ conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
+ resp_msg->flags_len);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
+ HTC_SVC_RESP_MSG_ENDPOINTID,
+ resp_msg->flags_len);
+
+ max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+setup:
+
+ if (assigned_eid >= ATH11K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath11k_ahb_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath11k_htc_start(struct ath11k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_setup_complete_extended *msg;
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_init(struct ath11k_base *ab)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath11k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = 3;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
new file mode 100644
index 000000000000..f0a3387567ca
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HTC_H
+#define ATH11K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath11k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath11k_htc_tx_flags {
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH11K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath11k_htc_rx_flags {
+ ATH11K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH11K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath11k_htc_hdr {
+ u32 htc_info;
+ u32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath11k_htc_msg_id {
+ ATH11K_HTC_MSG_READY_ID = 1,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath11k_htc_version {
+ ATH11K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH11K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+#define ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH11K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath11k_htc_conn_flags {
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+ ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+};
+
+enum ath11k_htc_conn_svc_status {
+ ATH11K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH11K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH11K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath11k_htc_ready {
+ u32 id_credit_count;
+ u32 size_ep;
+} __packed;
+
+struct ath11k_htc_ready_extended {
+ struct ath11k_htc_ready base;
+ u32 ver_bundle;
+} __packed;
+
+struct ath11k_htc_conn_svc {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed;
+
+struct ath11k_htc_conn_svc_resp {
+ u32 msg_svc_id;
+ u32 flags_len;
+ u32 svc_meta_pad;
+} __packed;
+
+struct ath11k_htc_setup_complete_extended {
+ u32 msg_id;
+ u32 flags;
+ u32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath11k_htc_msg {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed __aligned(4);
+
+enum ath11k_htc_record_id {
+ ATH11K_HTC_RECORD_NULL = 0,
+ ATH11K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath11k_htc_record_hdr {
+ u8 id; /* @enum ath11k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_credit_report {
+ u8 eid; /* @enum ath11k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_record {
+ struct ath11k_htc_record_hdr hdr;
+ union {
+ struct ath11k_htc_credit_report credit_report[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/* note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath11k_htc_frame {
+ struct ath11k_htc_hdr hdr;
+ union {
+ struct ath11k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath11k_htc_record trailer[0];
+} __packed __aligned(4);
+
+enum ath11k_htc_svc_gid {
+ ATH11K_HTC_SVC_GRP_RSVD = 0,
+ ATH11K_HTC_SVC_GRP_WMI = 1,
+ ATH11K_HTC_SVC_GRP_NMI = 2,
+ ATH11K_HTC_SVC_GRP_HTT = 3,
+ ATH11K_HTC_SVC_GRP_CFG = 4,
+ ATH11K_HTC_SVC_GRP_IPA = 5,
+ ATH11K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH11K_HTC_SVC_GRP_TEST = 254,
+ ATH11K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath11k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH11K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH11K_HTC_SVC_ID_UNUSED = ATH11K_HTC_SVC_ID_RESERVED,
+
+ ATH11K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH11K_HTC_SVC_GRP_RSVD, 1),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_WMI, 0),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH11K_HTC_SVC_GRP_WMI, 1),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH11K_HTC_SVC_GRP_WMI, 2),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH11K_HTC_SVC_GRP_WMI, 3),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH11K_HTC_SVC_GRP_WMI, 4),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH11K_HTC_SVC_GRP_WMI, 5),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH11K_HTC_SVC_GRP_WMI, 6),
+
+ ATH11K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_NMI, 0),
+ ATH11K_HTC_SVC_ID_NMI_DATA = SVC(ATH11K_HTC_SVC_GRP_NMI, 1),
+
+ ATH11K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH11K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH11K_HTC_SVC_GRP_TEST, 0),
+ ATH11K_HTC_SVC_ID_IPA_TX = SVC(ATH11K_HTC_SVC_GRP_IPA, 0),
+ ATH11K_HTC_SVC_ID_PKT_LOG = SVC(ATH11K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath11k_htc_ep_id {
+ ATH11K_HTC_EP_UNUSED = -1,
+ ATH11K_HTC_EP_0 = 0,
+ ATH11K_HTC_EP_1 = 1,
+ ATH11K_HTC_EP_2,
+ ATH11K_HTC_EP_3,
+ ATH11K_HTC_EP_4,
+ ATH11K_HTC_EP_5,
+ ATH11K_HTC_EP_6,
+ ATH11K_HTC_EP_7,
+ ATH11K_HTC_EP_8,
+ ATH11K_HTC_EP_COUNT,
+};
+
+struct ath11k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath11k_base *ar);
+};
+
+struct ath11k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath11k_base *);
+};
+
+/* service connection information */
+struct ath11k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath11k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath11k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH11K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH11K_HTC_MAX_LEN 4096
+#define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath11k_htc_hdr))
+#define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath11k_htc_ep {
+ struct ath11k_htc *htc;
+ enum ath11k_htc_ep_id eid;
+ enum ath11k_htc_svc_id service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath11k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath11k_htc {
+ struct ath11k_base *ab;
+ struct ath11k_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ struct ath11k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath11k_htc_svc_tx_credits
+ service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath11k_htc_init(struct ath11k_base *ar);
+int ath11k_htc_wait_target(struct ath11k_htc *htc);
+int ath11k_htc_start(struct ath11k_htc *htc);
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp);
+int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
new file mode 100644
index 000000000000..dd39333ec0ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HW_H
+#define ATH11K_HW_H
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS (16 + 1)
+
+#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS 512
+
+#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
+ 4 * TARGET_NUM_VDEVS + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+
+#define ATH11K_HW_MAX_QUEUES 4
+
+#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH11K_FW_DIR "ath11k"
+
+/* IPQ8074 definitions */
+#define IPQ8074_FW_DIR "IPQ8074"
+#define IPQ8074_MAX_BOARD_DATA_SZ (256 * 1024)
+#define IPQ8074_MAX_CAL_DATA_SZ IPQ8074_MAX_BOARD_DATA_SZ
+
+#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
+#define ATH11K_BOARD_API2_FILE "board-2.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "bdwlan.bin"
+#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+
+enum ath11k_hw_rate_cck {
+ ATH11K_HW_RATE_CCK_LP_11M = 0,
+ ATH11K_HW_RATE_CCK_LP_5_5M,
+ ATH11K_HW_RATE_CCK_LP_2M,
+ ATH11K_HW_RATE_CCK_LP_1M,
+ ATH11K_HW_RATE_CCK_SP_11M,
+ ATH11K_HW_RATE_CCK_SP_5_5M,
+ ATH11K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath11k_hw_rate_ofdm {
+ ATH11K_HW_RATE_OFDM_48M = 0,
+ ATH11K_HW_RATE_OFDM_24M,
+ ATH11K_HW_RATE_OFDM_12M,
+ ATH11K_HW_RATE_OFDM_6M,
+ ATH11K_HW_RATE_OFDM_54M,
+ ATH11K_HW_RATE_OFDM_36M,
+ ATH11K_HW_RATE_OFDM_18M,
+ ATH11K_HW_RATE_OFDM_9M,
+};
+
+struct ath11k_hw_params {
+ const char *name;
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_size;
+ } fw;
+};
+
+struct ath11k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath11k_bd_ie_board_type {
+ ATH11K_BD_IE_BOARD_NAME = 0,
+ ATH11K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath11k_bd_ie_type {
+ /* contains sub IEs of enum ath11k_bd_ie_board_type */
+ ATH11K_BD_IE_BOARD = 0,
+ ATH11K_BD_IE_BOARD_EXT = 1,
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
new file mode 100644
index 000000000000..6640662f5ede
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -0,0 +1,5907 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "testmode.h"
+#include "peer.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath11k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath11k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+};
+
+static struct ieee80211_rate ath11k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+};
+
+const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath11k_g_rates ath11k_legacy_rates
+#define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates))
+#define ath11k_a_rates (ath11k_legacy_rates + 4)
+#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
+
+#define ATH11K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
+
+static const u32 ath11k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case ATH11K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH11K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH11K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH11K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath11k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath11k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath11k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int get_num_chains(u32 mask)
+{
+ int num_chains = 0;
+
+ while (mask) {
+ if (mask & BIT(0))
+ num_chains++;
+ mask >>= 1;
+ }
+
+ return num_chains;
+}
+
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath11k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
+{
+/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath11k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath11k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath11k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif_iter *arvif_iter = data;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath11k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif)
+ return NULL;
+
+ return arvif_iter.arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->vdev_stop_status.stop_in_progress &&
+ ar->vdev_stop_status.vdev_id == vdev_id) {
+ ar->vdev_stop_status.stop_in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_pdev_caps_update(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath11k_mac_txpower_recalc(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath11k_mac_set_kickout(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH11K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
+{
+ int ret = 0;
+
+ ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = 0;
+
+ /* mac80211 requires this op to be present and that's why
+ * there's an empty function, this can be extended when
+ * required.
+ */
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* TODO: Handle configuration changes as appropriate */
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!bcn) {
+ ath11k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+
+ kfree_skb(bcn);
+
+ if (ret)
+ ath11k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void ath11k_control_beaconing(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath11k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+ /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
+ * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ /* TODO: rxnss_override */
+}
+
+static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u16 v;
+
+ if (!he_cap->has_he)
+ return;
+
+ arg->he_flag = true;
+
+ memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
+ sizeof(arg->peer_he_cap_macinfo));
+ memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
+ sizeof(arg->peer_he_cap_phyinfo));
+ memcpy(&arg->peer_he_ops, &vif->bss_conf.he_operation,
+ sizeof(arg->peer_he_ops));
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ /* fall through */
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_qos(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath11k_peer_assoc_qos_ap(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ap_ps_params params;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ params.vdev_id = arvif->vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ params.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ params.value = uapsd;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ params.value = max_sp;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ /* TODO revisit during testing */
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ params.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->he_cap.has_he) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath11k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ /* Check HE first */
+ if (sta->he_cap.has_he) {
+ phymode = ath11k_mac_get_phymode_he(ar, sta);
+ } else if (sta->vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath11k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath11k_peer_assoc_prepare(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg,
+ bool reassoc)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_smps(sta, arg);
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath11k_smps_map))
+ return -EINVAL;
+
+ return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath11k_smps_map[smps]);
+}
+
+static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct peer_assoc_params peer_arg;
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->ht_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = bss_conf->aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
+ /* Authorize BSS Peer */
+ ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+
+ ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ /* TODO: cancel connection_loss_work */
+}
+
+static u32 ath11k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) {
+ if (ath11k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath11k_legacy_rates[i].hw_value;
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret = 0;
+ u8 rateidx;
+ u32 rate;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = info->ssid_len;
+ if (info->ssid_len)
+ memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath11k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc)
+ ath11k_bss_assoc(hw, vif, info);
+ else
+ ath11k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ath11k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath11k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath11k_legacy_rates[rateidx].bitrate;
+ hw_value = ath11k_legacy_rates[rateidx].hw_value;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath11k_mac_vif_chan(arvif->vif, &def))
+ ath11k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ if (info->twt_requester || info->twt_responder)
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
+ else
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &info->he_obss_pd);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH11K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
+ ieee80211_remain_on_channel_expired(ar->hw);
+ }
+ /* fall through */
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath11k_scan_stop(struct ath11k *ar)
+{
+ struct scan_cancel_param arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH11K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH11K_SCAN_IDLE)
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath11k_scan_abort(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ ar->scan.state = ATH11K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_start_scan(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct scan_req_params arg;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ memset(&arg, 0, sizeof(arg));
+ ath11k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH11K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg.extraie.len = req->ie_len;
+ arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
+ memcpy(arg.extraie.ptr, req->ie, req->ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.num_ssids = req->n_ssids;
+ for (i = 0; i < arg.num_ssids; i++) {
+ arg.ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid,
+ req->ssids[i].ssid_len);
+ }
+ } else {
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg.num_chan = req->n_channels;
+ for (i = 0; i < arg.num_chan; i++)
+ arg.chan_list[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath11k_start_scan(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* Add a 200ms margin to account for event/command processing */
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(arg.max_scan_time +
+ ATH11K_MAC_SCAN_TIMEOUT_MSECS));
+
+exit:
+ if (req->ie_len)
+ kfree(arg.extraie.ptr);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath11k_install_key(struct ath11k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath11k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (cmd == DISABLE_KEY) {
+ /* TODO: Check if FW expects value other than NONE for del */
+ /* arg.key_cipher = WMI_CIPHER_NONE; */
+ arg.key_len = 0;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ default:
+ ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+install:
+ ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath11k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath11k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath11k_station_assoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ int ret = 0;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_vht_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+
+ /* If single VHT rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_station_disassoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath11k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err, num_vht_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct peer_assoc_params peer_arg;
+
+ arsta = container_of(wk, struct ath11k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT or no fixed VHT rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command.
+ */
+ ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+ else
+ ath11k_info(ar->ab,
+ "Station %pM moved to assoc state\n",
+ sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ else
+ ath11k_info(ar->ab,
+ "Station %pM moved to disassociated state\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 value = 0;
+ int ret = 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath11k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ /* TODO: SUBFEE not validated in HK, disable here until validated? */
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ *vht_cap |= sound_dim;
+ }
+
+ /* Use the STS advertised by FW unless SU Beamformee is not supported*/
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+}
+
+static struct ieee80211_sta_vht_cap
+ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ /* TODO: Enable back VHT160 mode once association issues are fixed */
+ /* Disabling VHT160 and VHT80+80 modes */
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_SRP_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static int ath11k_mac_copy_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath11k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+ he_cap_elem->phy_cap_info[4] &=
+ ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[4] &=
+ ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[4] |= (ar->num_tx_chains - 1) << 2;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath11k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath11k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath11k_mac_setup_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
+ band->n_iftype_data = count;
+ }
+}
+
+static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath11k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH11K_STATE_ON &&
+ ar->state != ATH11K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = get_num_chains(tx_ant);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = get_num_chains(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k *ar = ctx;
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *msdu = skb;
+ struct ieee80211_tx_info *info;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+ struct sk_buff *msdu = skb;
+ struct ath11k *ar = skb_cb->ar;
+ struct ath11k_base *ab = ar->ab;
+
+ if (skb_cb->vif == vif) {
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH11K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ieee80211_free_txskb(ar->hw, skb);
+}
+
+static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
+ struct ieee80211_tx_info *info;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ info = IEEE80211_SKB_CB(skb);
+ arvif = ath11k_vif_to_arvif(info->control.vif);
+
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
+ }
+}
+
+static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) {
+ ath11k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len(q) == ATH11K_TX_MGMT_NUM_PENDING_MAX) {
+ ath11k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool is_prb_rsp;
+ int ret;
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ ret = ath11k_dp_tx(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath11k_mac_drain_tx(struct ath11k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath11k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
+{
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+
+ if (enable)
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+
+ return ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+}
+
+static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_OFF:
+ ar->state = ATH11K_STATE_ON;
+ break;
+ case ATH11K_STATE_RESTARTING:
+ ar->state = ATH11K_STATE_RESTARTED;
+ break;
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_ON:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath11k_reg_update_chan_list(ar);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath11k_mac_config_mon_status_default(ar, true);
+ if (ret) {
+ ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_config_mon_status_default(ar, false);
+ if (ret)
+ ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static void
+ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
+ struct vdev_create_params *params)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_pdev *pdev = ar->pdev;
+
+ params->if_id = arvif->vdev_id;
+ params->type = arvif->vdev_type;
+ params->subtype = arvif->vdev_subtype;
+ params->pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+}
+
+static u32
+ath11k_mac_prepare_he_mode(struct ath11k_pdev *pdev, u32 viftype)
+{
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ struct ath11k_band_cap *cap_band = NULL;
+ u32 *hecap_phy_ptr = NULL;
+ u32 hemode = 0;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ else
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+
+ hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
+
+ hemode = FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE) |
+ FIELD_PREP(HE_MODE_SU_TX_BFER, HECAP_PHY_SUBFMR_GET(hecap_phy_ptr)) |
+ FIELD_PREP(HE_MODE_UL_MUMIMO, HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr));
+
+ /* TODO WDS and other modes */
+ if (viftype == NL80211_IFTYPE_AP) {
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFER,
+ HECAP_PHY_MUBFMR_GET(hecap_phy_ptr)) |
+ FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+ } else {
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
+ }
+
+ return hemode;
+}
+
+static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ u32 param_id, param_value;
+ struct ath11k_base *ab = ar->ab;
+ int ret = 0;
+
+ param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ param_value = ath11k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
+ arvif->vdev_id, ret, param_value);
+ return ret;
+ }
+ param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ param_value =
+ FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
+ FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct vdev_create_params vdev_param = {0};
+ struct peer_create_params peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+
+ /* Should we initialize any worker to handle connection loss indication
+ * from firmware in sta mode?
+ */
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ /* fall through */
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1);
+
+ ath11k_mac_setup_vdev_create_params(arvif, &vdev_param);
+
+ ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_set_kickout(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath11k_dp_vdev_tx_attach(ar, arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ar->num_peers--;
+ ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, arvif->vdev_id);
+ }
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->num_created_vdevs--;
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = ar->ab;
+ int ret;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ ar->num_created_vdevs--;
+
+ ath11k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_vif_txmgmt_idr_remove, vif);
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
+ ath11k_mac_vif_unref, vif);
+ spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ }
+
+ /* Recalc txpower for remaining vdev */
+ ath11k_mac_txpower_recalc(ar);
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath11k *ar = hw->priv;
+ bool reset_flag = false;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ /* For monitor mode */
+ reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
+ if (!ret) {
+ if (!reset_flag)
+ set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ else
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ } else {
+ ath11k_warn(ar->ab,
+ "fail to set monitor filter: %d\n", ret);
+ }
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath11k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath11k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath11k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static int
+ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ int he_support = arvif->vif->bss_conf.he_support;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode =
+ ath11k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.passive = arg.channel.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ /* TODO: Notify if secondary 80Mhz also needs radar detection */
+ if (he_support) {
+ ret = ath11k_set_he_mu_sounding_mode(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he mode vdev %i\n",
+ arg.vdev_id);
+ return ret;
+ }
+ }
+ }
+
+ arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath11k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.channel.freq, arg.vdev_id);
+ }
+
+ ret = ath11k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->vdev_stop_status.stop_in_progress = true;
+ ar->vdev_stop_status.vdev_id = arvif->vdev_id;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ spin_lock_bh(&ar->data_lock);
+ ar->vdev_stop_status.stop_in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+}
+
+static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+}
+
+struct ath11k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath11k_mac_update_vif_chan(struct ath11k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath11k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+}
+
+static void
+ath11k_mac_update_active_vif_chan(struct ath11k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ ath11k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx assign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto err;
+ }
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret)
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath11k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath11k driver
+ */
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath11k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+ long time_left;
+
+ if (drop)
+ return;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0)
+ ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+}
+
+static int
+ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight16(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static bool
+ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static bool
+ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ)
+ rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath11k_legacy_rates[rate_idx].bitrate;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath11k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath11k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static int
+ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath11k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+
+ if (ath11k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing atleast one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min_t(u32, ar->num_tx_chains,
+ max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath11k_warn(ar->ab,
+ "Setting more than one MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_RESTARTED) {
+ ath11k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH11K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath11k_mac_update_bss_chan_survey(struct ath11k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH11K_SCAN_IDLE) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath11k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH11K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* TODO: Use real NF instead of default one. */
+ sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+}
+
+static const struct ieee80211_ops ath11k_ops = {
+ .tx = ath11k_mac_op_tx,
+ .start = ath11k_mac_op_start,
+ .stop = ath11k_mac_op_stop,
+ .reconfig_complete = ath11k_mac_op_reconfig_complete,
+ .add_interface = ath11k_mac_op_add_interface,
+ .remove_interface = ath11k_mac_op_remove_interface,
+ .config = ath11k_mac_op_config,
+ .bss_info_changed = ath11k_mac_op_bss_info_changed,
+ .configure_filter = ath11k_mac_op_configure_filter,
+ .hw_scan = ath11k_mac_op_hw_scan,
+ .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
+ .set_key = ath11k_mac_op_set_key,
+ .sta_state = ath11k_mac_op_sta_state,
+ .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
+ .sta_rc_update = ath11k_mac_op_sta_rc_update,
+ .conf_tx = ath11k_mac_op_conf_tx,
+ .set_antenna = ath11k_mac_op_set_antenna,
+ .get_antenna = ath11k_mac_op_get_antenna,
+ .ampdu_action = ath11k_mac_op_ampdu_action,
+ .add_chanctx = ath11k_mac_op_add_chanctx,
+ .remove_chanctx = ath11k_mac_op_remove_chanctx,
+ .change_chanctx = ath11k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath11k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath11k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask,
+ .get_survey = ath11k_mac_op_get_survey,
+ .flush = ath11k_mac_op_flush,
+ .sta_statistics = ath11k_mac_op_sta_statistics,
+ CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+#ifdef CONFIG_ATH11K_DEBUGFS
+ .sta_add_debugfs = ath11k_sta_add_debugfs,
+#endif
+};
+
+static const struct ieee80211_iface_limit ath11k_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_combination ath11k_if_comb[] = {
+ {
+ .limits = ath11k_if_limits,
+ .n_limits = ARRAY_SIZE(ath11k_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 100,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ },
+};
+
+static void ath11k_mac_update_ch_list(struct ath11k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap;
+ void *channels;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
+ ARRAY_SIZE(ath11k_5ghz_channels)) !=
+ ATH11K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath11k_2ghz_channels,
+ sizeof(ath11k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_g_rates_size;
+ band->bitrates = ath11k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ ath11k_mac_update_ch_list(ar, band,
+ reg_cap->low_2ghz_chan,
+ reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ channels = kmemdup(ath11k_5ghz_channels,
+ sizeof(ath11k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ ath11k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+
+ return 0;
+}
+
+static const u8 ath11k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath11k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath11k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath11k_if_types_ext_capa,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath11k_mac_unregister(struct ath11k *ar)
+{
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath11k_mac_unregister(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath11k_mac_unregister(ar);
+ }
+}
+
+static int __ath11k_mac_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath11k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath11k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err_free;
+
+ ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath11k_mac_setup_he_cap(ar, cap);
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ if (ht_cap & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+
+ ar->hw->vif_data_size = sizeof(struct ath11k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath11k_sta);
+
+ ar->hw->wiphy->iface_combinations = ath11k_if_comb;
+ ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath11k_iftypes_ext_capa);
+
+ ath11k_reg_init(ar);
+
+ /* advertise HW checksum offload capabilities */
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free;
+ }
+
+ /* Apply the regd received during initialization */
+ ret = ath11k_regd_update(ar, true);
+ if (ret) {
+ ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = ath11k_debug_register(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath11k_mac_register(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ ret = __ath11k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+ }
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath11k_mac_unregister(ar);
+ }
+
+ return ret;
+}
+
+int ath11k_mac_allocate(struct ath11k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+ if (!hw) {
+ ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath11k_core_get_hw_mac_id(ab, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath11k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath11k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath11k_mac_destroy(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
new file mode 100644
index 000000000000..f286531cdd78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_MAC_H
+#define ATH11K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+
+struct ath11k;
+struct ath11k_base;
+
+struct ath11k_generic_iter {
+ struct ath11k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH11K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH11K_KEEPALIVE_MIN_IDLE 3747
+#define ATH11K_KEEPALIVE_MAX_IDLE 3895
+#define ATH11K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+#define ATH11K_CHAN_WIDTH_NUM 8
+
+extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
+
+void ath11k_mac_destroy(struct ath11k_base *ab);
+void ath11k_mac_unregister(struct ath11k_base *ab);
+int ath11k_mac_register(struct ath11k_base *ab);
+int ath11k_mac_allocate(struct ath11k_base *ab);
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath11k_mac_scan_finish(struct ath11k *ar);
+void ath11k_mac_scan_finish(struct ath11k *ar);
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
+struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
+ u32 vdev_id);
+
+void ath11k_mac_drain_tx(struct ath11k *ar);
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
new file mode 100644
index 000000000000..4bf1dfa498b6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (peer_id == peer->peer_id)
+ return peer;
+
+ return NULL;
+}
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath11k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param)
+{
+ struct ath11k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ param->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
+ param->peer_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ param->peer_addr, param->vdev_id);
+ ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
+ param->vdev_id);
+ return -ENOENT;
+ }
+
+ peer->sta = sta;
+ arvif->ast_hash = peer->ast_hash;
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
new file mode 100644
index 000000000000..9a40d1f6ccd9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_PEER_H
+#define ATH11K_PEER_H
+
+struct ath11k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param);
+
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
new file mode 100644
index 000000000000..2377895a58ec
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -0,0 +1,2433 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include <linux/of.h>
+#include <linux/firmware.h>
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ goto resp_out;
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to register fw indication %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0, i;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ }
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i, idx;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case BDF_MEM_REGION_TYPE:
+ ab->qmi.target_mem[idx].paddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].vaddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) {
+ ath11k_warn(ab, "qmi mem size is low to load caldata\n");
+ return -EINVAL;
+ }
+ /* TODO ath11k does not support cold boot calibration */
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].vaddr = 0;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath11k_warn(ab, "qmi ignore invalid mem req type %d\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+}
+
+static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed target cap request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = 0xFF;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strlcpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ ath11k_info(ab, "qmi target: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath11k_info(ab, "qmi fw_version: 0x%x fw_build_timestamp: %s fw_build_id: %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ ab->qmi.target.fw_build_id);
+
+out:
+ return ret;
+}
+
+static int
+ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
+ void __iomem *bdf_addr)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath11k_board_data bd;
+ u32 fw_size;
+ int ret = 0;
+
+ memset(&bd, 0, sizeof(bd));
+
+ switch (type) {
+ case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load BDF\n");
+ goto out;
+ }
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
+ memcpy_toio(bdf_addr, bd.data, fw_size);
+ ath11k_core_free_bdf(ab, &bd);
+ break;
+ case ATH11K_QMI_FILE_TYPE_CALDATA:
+ snprintf(filename, sizeof(filename),
+ "%s/%s", ab->hw_params.fw.dir, ATH11K_QMI_DEFAULT_CAL_FILE_NAME);
+ ret = request_firmware(&fw_entry, filename, dev);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load CAL: %s\n", filename);
+ goto out;
+ }
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size,
+ fw_entry->size);
+
+ memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
+ fw_entry->data, fw_size);
+ ath11k_info(ab, "qmi downloading BDF: %s, size: %zu\n",
+ filename, fw_entry->size);
+
+ release_firmware(fw_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ req->total_size = fw_size;
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ void __iomem *bdf_addr = NULL;
+ int type, ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ bdf_addr = ioremap(ATH11K_QMI_BDF_ADDRESS, ATH11K_QMI_BDF_MAX_SIZE);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for BDF\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->seg_id_valid = 1;
+ req->seg_id = type;
+ req->data_valid = 0;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ req->bdf_type = 0;
+ req->bdf_type_valid = 0;
+ req->end_valid = 1;
+ req->end = 1;
+
+ ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out_qmi_bdf;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out_qmi_bdf;
+ }
+ }
+ ath11k_info(ab, "qmi BDF downloaded\n");
+
+out_qmi_bdf:
+ iounmap(bdf_addr);
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.addr = 0;
+ req.size = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed M3 information request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath11k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn = {};
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num <= req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+ req->shadow_reg_valid = 0;
+ req->shadow_reg_v2_valid = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan mode off\n");
+ return;
+ }
+}
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
+ enum ath11k_qmi_event_type type,
+ void *data)
+{
+ struct ath11k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static void ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_load_bdf(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath11k_warn(ab, "Invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to alloc target memory:%d\n", ret);
+ return;
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware memory ready indication\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware ready\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
+}
+
+static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+}
+
+static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_request_mem_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_mem_ready_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_fw_ready_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_fw_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+ .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
+ },
+};
+
+static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return 0;
+}
+
+static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw del server\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath11k_qmi_ops = {
+ .new_server = ath11k_qmi_ops_new_server,
+ .del_server = ath11k_qmi_ops_del_server,
+};
+
+static void ath11k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
+ event_work);
+ struct ath11k_qmi_driver_event *event;
+ struct ath11k_base *ab = qmi->ab;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath11k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))
+ return;
+
+ switch (event->type) {
+ case ATH11K_QMI_EVENT_SERVER_ARRIVE:
+ ath11k_qmi_event_server_arrive(qmi);
+ break;
+ case ATH11K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_REQUEST_MEM:
+ ath11k_qmi_event_mem_request(qmi);
+ break;
+ case ATH11K_QMI_EVENT_FW_MEM_READY:
+ ath11k_qmi_event_load_bdf(qmi);
+ break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ ath11k_core_qmi_firmware_ready(ab);
+ ab->qmi.cal_done = 1;
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
+ break;
+ default:
+ ath11k_warn(ab, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath11k_qmi_init_service(struct ath11k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ATH11K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
+ &ath11k_qmi_ops, ath11k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to initialize qmi handle\n");
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!ab->qmi.event_wq) {
+ ath11k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
+ ATH11K_QMI_WLFW_SERVICE_VERS_V01,
+ ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to add qmi lookup\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath11k_qmi_deinit_service(struct ath11k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+}
+
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
new file mode 100644
index 000000000000..3f7db642d869
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_QMI_H
+#define ATH11K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH11K_HOST_VERSION_STRING "WIN"
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
+#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH11K_QMI_BDF_ADDRESS 0x4B0C0000
+#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
+#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
+#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH11K_QMI_RESP_LEN_MAX 8192
+#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
+#define ATH11K_QMI_CALDB_SIZE 0x480000
+#define ATH11K_QMI_DEFAULT_CAL_FILE_NAME "caldata.bin"
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
+#define QMI_WLFW_FW_READY_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH11K_FIRMWARE_MODE_OFF 4
+#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
+
+struct ath11k_base;
+
+enum ath11k_qmi_file_type {
+ ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
+ ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_MAX_FILE_TYPE,
+};
+
+enum ath11k_qmi_event_type {
+ ATH11K_QMI_EVENT_SERVER_ARRIVE,
+ ATH11K_QMI_EVENT_SERVER_EXIT,
+ ATH11K_QMI_EVENT_REQUEST_MEM,
+ ATH11K_QMI_EVENT_FW_MEM_READY,
+ ATH11K_QMI_EVENT_FW_READY,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_START,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE,
+ ATH11K_QMI_EVENT_REGISTER_DRIVER,
+ ATH11K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH11K_QMI_EVENT_RECOVERY,
+ ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH11K_QMI_EVENT_POWER_UP,
+ ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_MAX,
+};
+
+struct ath11k_qmi_driver_event {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+ void *data;
+};
+
+struct ath11k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u8 *shadow_reg_v2;
+ int shadow_reg_v2_len;
+};
+
+struct ath11k_qmi_event_msg {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ dma_addr_t paddr;
+ u32 vaddr;
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+};
+
+struct ath11k_qmi {
+ struct ath11k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath11k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ u8 cal_done;
+ struct target_info target;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_IPQ8074_FW_MEM_MODE 0xFF
+#define HOST_DDR_REGION_TYPE 0x1
+#define BDF_MEM_REGION_TYPE 0x2
+#define CALDB_MEM_REGION_TYPE 0x4
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01 36
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode);
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
+void ath11k_qmi_event_work(struct work_struct *work);
+void ath11k_qmi_msg_recv_work(struct work_struct *work);
+void ath11k_qmi_deinit_service(struct ath11k_base *ab);
+int ath11k_qmi_init_service(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
new file mode 100644
index 000000000000..453aa9c06969
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath11k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH11K_2GHZ_CH01_11,
+ ATH11K_5GHZ_5150_5350,
+ ATH11K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wmi_init_country_params init_country_param;
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath11k_regdom_changes(ar, request->alpha2)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and wait for
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+}
+
+int ath11k_reg_update_chan_list(struct ath11k *ar)
+{
+ struct ieee80211_supported_band **bands;
+ struct scan_chan_list_params *params;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct channel_param *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int params_len;
+ int i, ret;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ params_len = sizeof(struct scan_chan_list_params) +
+ num_channels * sizeof(struct channel_param);
+ params = kzalloc(params_len, GFP_KERNEL);
+
+ if (!params)
+ return -ENOMEM;
+
+ params->pdev_id = ar->pdev->pdev_id;
+ params->nallchans = num_channels;
+
+ ch = params->ch_param;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, params->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ kfree(params);
+
+ return ret;
+}
+
+static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath11k_regd_update(struct ath11k *ar, bool init)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath11k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock(&ab->base_lock);
+
+ if (init) {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath11k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
+ }
+ } else {
+ regd = ab->new_regd[pdev_id];
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath11k_copy_regd(regd, regd_copy);
+
+ spin_unlock(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rtnl_lock();
+ ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy);
+ rtnl_unlock();
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ if (ar->state == ATH11K_STATE_ON) {
+ ret = ath11k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH11K_DFS_REG_FCC:
+ case ATH11K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH11K_DFS_REG_ETSI:
+ case ATH11K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH11K_DFS_REG_MKK:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static bool
+ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ ath11k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw < 40)
+ bw = 20;
+
+ return bw;
+}
+
+static void
+ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct cur_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+
+ ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
+ end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
+ else
+ end_freq = reg_rule->end_freq;
+
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ max_bw);
+
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (end_freq == reg_rule->end_freq) {
+ regd->n_reg_rules--;
+ *rule_idx = i;
+ return;
+ }
+
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ *rule_idx = i;
+}
+
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct cur_reg_rule *reg_rule;
+ u8 i = 0, j = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ tmp_regd->n_reg_rules = num_rules;
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2G rules first and then 5G)
+ */
+ for (; i < tmp_regd->n_reg_rules; i++) {
+ if (reg_info->num_2g_reg_rules &&
+ (i < reg_info->num_2g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2g_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2g);
+ flags = 0;
+ } else if (reg_info->num_5g_reg_rules &&
+ (j < reg_info->num_5g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5g_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5g);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else {
+ break;
+ }
+
+ flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
+
+ ath11k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath11k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath11k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath11k_regd_update_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ regd_update_work);
+ int ret;
+
+ ret = ath11k_regd_update(ar, false);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath11k_reg_init(struct ath11k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+}
+
+void ath11k_reg_free(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < MAX_RADIOS; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
new file mode 100644
index 000000000000..39b7fc943541
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_REG_H
+#define ATH11K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath11k_base;
+struct ath11k;
+
+/* DFS regdomains supported by Firmware */
+enum ath11k_dfs_region {
+ ATH11K_DFS_REG_UNSET,
+ ATH11K_DFS_REG_FCC,
+ ATH11K_DFS_REG_ETSI,
+ ATH11K_DFS_REG_MKK,
+ ATH11K_DFS_REG_CN,
+ ATH11K_DFS_REG_KR,
+ ATH11K_DFS_REG_UNDEF,
+};
+
+/* ATH11K Regulatory API's */
+void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_free(struct ath11k_base *ab);
+void ath11k_regd_update_work(struct work_struct *work);
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect);
+int ath11k_regd_update(struct ath11k *ar, bool init);
+int ath11k_reg_update_chan_list(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
new file mode 100644
index 000000000000..a5aff801f17f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -0,0 +1,1212 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_RX_DESC_H
+#define ATH11K_RX_DESC_H
+
+enum rx_desc_rxpcu_filter {
+ RX_DESC_RXPCU_FILTER_PASS,
+ RX_DESC_RXPCU_FILTER_MONITOR_CLIENT,
+ RX_DESC_RXPCU_FILTER_MONITOR_OTHER,
+};
+
+/* rxpcu_filter_pass
+ * This MPDU passed the normal frame filter programming of rxpcu.
+ *
+ * rxpcu_filter_monitor_client
+ * This MPDU did not pass the regular frame filter and would
+ * have been dropped, were it not for the frame fitting into the
+ * 'monitor_client' category.
+ *
+ * rxpcu_filter_monitor_other
+ * This MPDU did not pass the regular frame filter and also did
+ * not pass the rxpcu_monitor_client filter. It would have been
+ * dropped accept that it did pass the 'monitor_other' category.
+ */
+
+#define RX_DESC_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_DESC_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+enum rx_desc_sw_frame_grp_id {
+ RX_DESC_SW_FRAME_GRP_ID_NDP_FRAME,
+ RX_DESC_SW_FRAME_GRP_ID_MCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_UCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_NULL_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0111,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1111,
+ RX_DESC_SW_FRAME_GRP_ID_UNSUPPORTED,
+ RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
+};
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_ATTENTION_INFO1_FIRST_MPDU BIT(0)
+#define RX_ATTENTION_INFO1_RSVD_1A BIT(1)
+#define RX_ATTENTION_INFO1_MCAST_BCAST BIT(2)
+#define RX_ATTENTION_INFO1_AST_IDX_NOT_FOUND BIT(3)
+#define RX_ATTENTION_INFO1_AST_IDX_TIMEDOUT BIT(4)
+#define RX_ATTENTION_INFO1_POWER_MGMT BIT(5)
+#define RX_ATTENTION_INFO1_NON_QOS BIT(6)
+#define RX_ATTENTION_INFO1_NULL_DATA BIT(7)
+#define RX_ATTENTION_INFO1_MGMT_TYPE BIT(8)
+#define RX_ATTENTION_INFO1_CTRL_TYPE BIT(9)
+#define RX_ATTENTION_INFO1_MORE_DATA BIT(10)
+#define RX_ATTENTION_INFO1_EOSP BIT(11)
+#define RX_ATTENTION_INFO1_A_MSDU_ERROR BIT(12)
+#define RX_ATTENTION_INFO1_FRAGMENT BIT(13)
+#define RX_ATTENTION_INFO1_ORDER BIT(14)
+#define RX_ATTENTION_INFO1_CCE_MATCH BIT(15)
+#define RX_ATTENTION_INFO1_OVERFLOW_ERR BIT(16)
+#define RX_ATTENTION_INFO1_MSDU_LEN_ERR BIT(17)
+#define RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_ATTENTION_INFO1_IP_CKSUM_FAIL BIT(19)
+#define RX_ATTENTION_INFO1_SA_IDX_INVALID BIT(20)
+#define RX_ATTENTION_INFO1_DA_IDX_INVALID BIT(21)
+#define RX_ATTENTION_INFO1_RSVD_1B BIT(22)
+#define RX_ATTENTION_INFO1_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_ATTENTION_INFO1_ENCRYPT_REQUIRED BIT(24)
+#define RX_ATTENTION_INFO1_DIRECTED BIT(25)
+#define RX_ATTENTION_INFO1_BUFFER_FRAGMENT BIT(26)
+#define RX_ATTENTION_INFO1_MPDU_LEN_ERR BIT(27)
+#define RX_ATTENTION_INFO1_TKIP_MIC_ERR BIT(28)
+#define RX_ATTENTION_INFO1_DECRYPT_ERR BIT(29)
+#define RX_ATTENTION_INFO1_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_ATTENTION_INFO1_FCS_ERR BIT(31)
+
+#define RX_ATTENTION_INFO2_FLOW_IDX_TIMEOUT BIT(0)
+#define RX_ATTENTION_INFO2_FLOW_IDX_INVALID BIT(1)
+#define RX_ATTENTION_INFO2_WIFI_PARSER_ERR BIT(2)
+#define RX_ATTENTION_INFO2_AMSDU_PARSER_ERR BIT(3)
+#define RX_ATTENTION_INFO2_SA_IDX_TIMEOUT BIT(4)
+#define RX_ATTENTION_INFO2_DA_IDX_TIMEOUT BIT(5)
+#define RX_ATTENTION_INFO2_MSDU_LIMIT_ERR BIT(6)
+#define RX_ATTENTION_INFO2_DA_IS_VALID BIT(7)
+#define RX_ATTENTION_INFO2_DA_IS_MCBC BIT(8)
+#define RX_ATTENTION_INFO2_SA_IS_VALID BIT(9)
+#define RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_ATTENTION_INFO2_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_ATTENTION_INFO2_MSDU_DONE BIT(31)
+
+struct rx_attention {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+/* rx_attention
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and will not
+ * be decasulated but will be DMA'ed in RAW format as a single
+ * MSDU buffer.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast or Broadcast address.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values are
+ * defined in enum %RX_DESC_DECRYPT_STATUS_CODE*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+#define RX_MPDU_START_INFO0_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO0_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO0_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO1_MPDU_CTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO1_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO1_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO1_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO1_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO1_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO1_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO1_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO1_TO_DS BIT(17)
+#define RX_MPDU_START_INFO1_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO1_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA BIT(8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
+#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+
+#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO3_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO3_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO4_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO4_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO4_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO4_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+
+#define RX_MPDU_START_RAW_MPDU BIT(0)
+
+struct rx_mpdu_start {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+ __le32 peer_meta_data;
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 raw;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ * Note: for ndp frame, if it was expected because the preceding
+ * NDPA was filter_pass, the setting rxpcu_filter_pass will be
+ * used. This setting will also be used for every ndp frame in
+ * case Promiscuous mode is enabled.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * Indicates that the received frame was an NDP frame.
+ *
+ * phy_err
+ * Indicates that PHY error was received before MAC received data.
+ *
+ * phy_err_during_mpdu_header
+ * PHY error was received before MAC received the complete MPDU
+ * header which was needed for proper decoding.
+ *
+ * protocol_version_err
+ * RXPCU detected a version error in the frame control field.
+ *
+ * ast_based_lookup_valid
+ * AST based lookup for this frame has found a valid result.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated in
+ * RXPCU. A value of 0xFFFF indicates an invalid AST index.
+ *
+ * sw_peer_id
+ * This field indicates a unique peer identifier. It is set equal
+ * to field 'sw_peer_id' from the AST entry.
+ *
+ * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid,
+ * mpdu_ht_control_valid, frame_encryption_info_valid
+ * Indicates that each fields have valid entries.
+ *
+ * mac_addr_adx_valid
+ * Corresponding mac_addr_adx_{lo/hi} has valid entries.
+ *
+ * from_ds, to_ds
+ * Valid only when mpdu_frame_control_valid is set. Indicates that
+ * frame is received from DS and sent to DS.
+ *
+ * encrypted
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Retry bit from frame control. Only valid when first_msdu is set.
+ *
+ * mpdu_sequence_number
+ * The sequence number from the 802.11 header.
+ *
+ * epd_en
+ * If set, use EPD instead of LPD.
+ *
+ * all_frames_shall_be_encrypted
+ * If set, all frames (data only?) shall be encrypted. If not,
+ * RX CRYPTO shall set an error flag.
+ *
+ * encrypt_type
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * mesh_sta
+ * Indicates a Mesh (11s) STA.
+ *
+ * bssid_hit
+ * BSSID of the incoming frame matched one of the 8 BSSID
+ * register values.
+ *
+ * bssid_number
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * pn
+ * The PN number.
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * key_id
+ * The key ID octet from the IV.
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu..directed
+ * See definition in RX attention descriptor
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_START_INFO1_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_START_INFO1_RSVD_1A BIT(14)
+#define RX_MSDU_START_INFO1_IPSEC_ESP BIT(15)
+#define RX_MSDU_START_INFO1_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_START_INFO1_IPSEC_AH BIT(23)
+#define RX_MSDU_START_INFO1_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_START_INFO2_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_START_INFO2_DECAP_TYPE GENMASK(9, 8)
+#define RX_MSDU_START_INFO2_IPV4 BIT(10)
+#define RX_MSDU_START_INFO2_IPV6 BIT(11)
+#define RX_MSDU_START_INFO2_TCP BIT(12)
+#define RX_MSDU_START_INFO2_UDP BIT(13)
+#define RX_MSDU_START_INFO2_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO2_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_START_INFO2_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_START_INFO2_SELECTED_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_START_INFO2_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_START_INFO2_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_START_INFO2_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_START_INFO2_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_START_INFO2_LDPC BIT(23)
+#define RX_MSDU_START_INFO2_IP4_IP6_NXT_HDR GENMASK(31, 24)
+#define RX_MSDU_START_INFO2_DECAP_FORMAT GENMASK(9, 8)
+
+#define RX_MSDU_START_INFO3_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_START_INFO3_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_START_INFO3_STBC BIT(12)
+#define RX_MSDU_START_INFO3_SGI GENMASK(14, 13)
+#define RX_MSDU_START_INFO3_RATE_MCS GENMASK(18, 15)
+#define RX_MSDU_START_INFO3_RECV_BW GENMASK(20, 19)
+#define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24)
+
+struct rx_msdu_start {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+} __packed;
+
+/* rx_msdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by RxOLE register - If register bit set to 0,
+ * Toeplitz hash is computed over 2-tuple IPv4 or IPv6 src/dest
+ * addresses; otherwise, toeplitz hash is computed over 4-tuple
+ * IPv4 or IPv6 src/dest addresses and src/dest ports.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ */
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_KEY_ID GENMASK(7, 0)
+#define RX_MSDU_END_INFO1_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO1_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO1_CCND_CCE_DIS BIT(15)
+#define RX_MSDU_END_INFO1_EXT_WAPI_PN GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
+#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
+#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
+#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
+#define RX_MSDU_END_INFO2_FLOW_IDX_TIMEOUT BIT(19)
+#define RX_MSDU_END_INFO2_FLOW_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO2_WIFI_PARSER_ERR BIT(21)
+#define RX_MSDU_END_INFO2_AMSDU_PARSET_ERR BIT(22)
+#define RX_MSDU_END_INFO2_SA_IS_VALID BIT(23)
+#define RX_MSDU_END_INFO2_DA_IS_VALID BIT(24)
+#define RX_MSDU_END_INFO2_DA_IS_MCBC BIT(25)
+#define RX_MSDU_END_INFO2_L3_HDR_PADDING GENMASK(27, 26)
+
+#define RX_MSDU_END_INFO3_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO3_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO4_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO4_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO4_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO4_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO4_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO5_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6)
+
+struct rx_msdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 tcp_udp_cksum;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info4;
+ __le32 rule_indication[2];
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * ext_wapi_pn*
+ * Extension PN (packet number) which is only used by WAPI.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * sa_idx
+ * The offset in the address table which matches MAC source address
+ *
+ * da_idx
+ * The offset in the address table which matches MAC destination
+ * address.
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ */
+
+enum rx_mpdu_end_rxdma_dest_ring {
+ RX_MPDU_END_RXDMA_DEST_RING_RELEASE,
+ RX_MPDU_END_RXDMA_DEST_RING_FW,
+ RX_MPDU_END_RXDMA_DEST_RING_SW,
+ RX_MPDU_END_RXDMA_DEST_RING_REO,
+};
+
+#define RX_MPDU_END_INFO1_UNSUP_KTYPE_SHORT_FRAME BIT(11)
+#define RX_MPDU_END_INFO1_RX_IN_TX_DECRYPT_BYT BIT(12)
+#define RX_MPDU_END_INFO1_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO1_MPDU_LEN_ERR BIT(14)
+#define RX_MPDU_END_INFO1_TKIP_MIC_ERR BIT(15)
+#define RX_MPDU_END_INFO1_DECRYPT_ERR BIT(16)
+#define RX_MPDU_END_INFO1_UNENCRYPTED_FRAME_ERR BIT(17)
+#define RX_MPDU_END_INFO1_PN_FIELDS_VALID BIT(18)
+#define RX_MPDU_END_INFO1_FCS_ERR BIT(19)
+#define RX_MPDU_END_INFO1_MSDU_LEN_ERR BIT(20)
+#define RX_MPDU_END_INFO1_RXDMA0_DEST_RING GENMASK(22, 21)
+#define RX_MPDU_END_INFO1_RXDMA1_DEST_RING GENMASK(24, 23)
+#define RX_MPDU_END_INFO1_DECRYPT_STATUS_CODE GENMASK(27, 25)
+#define RX_MPDU_END_INFO1_RX_BITMAP_NOT_UPD BIT(28)
+
+struct rx_mpdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+} __packed;
+
+/* rx_mpdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * unsup_ktype_short_frame
+ * This bit will be '1' when WEP or TKIP or WAPI key type is
+ * received for 11ah short frame. Crypto will bypass the received
+ * packet without decryption to RxOLE after setting this bit.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is
+ * busy with TX packet processing.
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tkip_mic_err
+ * Set by Rx crypto when crypto detected a TKIP MIC error for
+ * this MPDU.
+ *
+ * decrypt_err
+ * Set by RX CRYPTO when CRYPTO detected a decrypt error for this
+ * MPDU or CRYPTO received an encrypted frame, but did not get a
+ * valid corresponding key id in the peer entry.
+ *
+ * unencrypted_frame_err
+ * Set by RX CRYPTO when CRYPTO detected an unencrypted frame while
+ * in the peer entry field 'All_frames_shall_be_encrypted' is set.
+ *
+ * pn_fields_contain_valid_info
+ * Set by RX CRYPTO to indicate that there is a valid PN field
+ * present in this MPDU.
+ *
+ * fcs_err
+ * Set by RXPCU when there is an FCS error detected for this MPDU.
+ *
+ * msdu_length_err
+ * Set by RXOLE when there is an msdu length error detected
+ * in at least 1 of the MSDUs embedded within the MPDU.
+ *
+ * rxdma0_destination_ring
+ * rxdma1_destination_ring
+ * The ring to which RXDMA0/1 shall push the frame, assuming
+ * no MPDU level errors are detected. In case of MPDU level
+ * errors, RXDMA0/1 might change the RXDMA0/1 destination. Values
+ * are defined in %enum RX_MPDU_END_RXDMA_DEST_RING_*.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ */
+
+/* Padding bytes to avoid TLV's spanning across 128 byte boundary */
+#define HAL_RX_DESC_PADDING0_BYTES 4
+#define HAL_RX_DESC_PADDING1_BYTES 16
+
+#define HAL_RX_DESC_HDR_STATUS_LEN 120
+
+struct hal_rx_desc {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+} __packed;
+
+#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
new file mode 100644
index 000000000000..d2dc9db01491
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "testmode_i.h"
+
+static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
+ [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH11K_TM_DATA_MAX_LEN },
+ [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ bool consumed;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ consumed = true;
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + skb->len,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ar->ab,
+ "failed to allocate skb for testmode wmi event\n");
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to to put testmode wmi event cmd attribute: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to to put testmode wmi even cmd_id: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to copy skb to testmode wmi event: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+
+ return consumed;
+}
+
+static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH11K_TESTMODE_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct sk_buff *skb;
+ u32 cmd_id, buf_len;
+ int ret;
+ void *buf;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ cmd_id, buf, buf_len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath11k *ar = hw->priv;
+ struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH11K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
+ case ATH11K_TM_CMD_GET_VERSION:
+ return ath11k_tm_cmd_get_version(ar, tb);
+ case ATH11K_TM_CMD_WMI:
+ return ath11k_tm_cmd_wmi(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/testmode.h b/drivers/net/wireless/ath/ath11k/testmode.h
new file mode 100644
index 000000000000..aaa122ed9069
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/ath11k/testmode_i.h
new file mode 100644
index 000000000000..4bae2a9eeea4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode_i.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+/* "API" level of the ath11k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH11K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH11K_TESTMODE_VERSION_MINOR 0
+
+#define ATH11K_TM_DATA_MAX_LEN 5000
+
+enum ath11k_tm_attr {
+ __ATH11K_TM_ATTR_INVALID = 0,
+ ATH11K_TM_ATTR_CMD = 1,
+ ATH11K_TM_ATTR_DATA = 2,
+ ATH11K_TM_ATTR_WMI_CMDID = 3,
+ ATH11K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH11K_TM_ATTR_VERSION_MINOR = 5,
+ ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
+
+ /* keep last */
+ __ATH11K_TM_ATTR_AFTER_LAST,
+ ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath11k testmode interface commands specified in
+ * ATH11K_TM_ATTR_CMD
+ */
+enum ath11k_tm_cmd {
+ /* Returns the supported ath11k testmode interface version in
+ * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH11K_TM_CMD_GET_VERSION = 0,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
+ * ATH11K_TM_ATTR_DATA.
+ */
+ ATH11K_TM_CMD_WMI = 1,
+};
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
new file mode 100644
index 000000000000..f0cc49ba0387
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
new file mode 100644
index 000000000000..8700a622be7b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH11K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath11k
+
+TRACE_EVENT(ath11k_htt_pktlog,
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_ppdu_stats,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_rxdesc,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
new file mode 100644
index 000000000000..a9b301ceb24b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -0,0 +1,5810 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext_parse {
+ struct ath11k_service_ext_param param;
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct wmi_hw_mode_capabilities pref_hw_mode_caps;
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
+ struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+};
+
+struct wmi_tlv_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
+ = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
+ [WMI_TAG_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT]
+ = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT]
+ = {.min_len = sizeof(struct wmi_ready_event) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT]
+ = {.min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_STATS_EVENT]
+ = { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+};
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath11k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = cmd;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+
+ if (ret == -EAGAIN)
+ ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+
+ return ret;
+}
+
+static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ const void *ptr,
+ struct ath11k_service_ext_param *param)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
+ param->default_fw_config_bits = ev->default_fw_config_bits;
+ param->he_cap_info = ev->he_cap_info;
+ param->mpdu_density = ev->mpdu_density;
+ param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
+ memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
+
+ return 0;
+}
+
+static int
+ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
+ struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
+ struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
+ struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath11k_pdev *pdev)
+{
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_band_cap *cap_band;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
+ if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
+ break;
+
+ phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
+ while (phy_map) {
+ phy_map >>= 1;
+ phy_idx++;
+ }
+ }
+
+ if (hw_idx == hw_caps->num_hw_modes)
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= hal_reg_caps->num_phy)
+ return -EINVAL;
+
+ mac_phy_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = mac_phy_caps->pdev_id;
+ pdev_cap->supported_bands = mac_phy_caps->supported_bands;
+ pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
+ } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
+ pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
+ pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ } else {
+ return -EINVAL;
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value for
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ return 0;
+}
+
+static int
+ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_hal_reg_capabilities *reg_caps,
+ struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
+ u8 phy_idx,
+ struct ath11k_hal_reg_capabilities_ext *param)
+{
+ struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
+
+ if (!reg_caps || !wmi_ext_reg_cap)
+ return -EINVAL;
+
+ if (phy_idx >= reg_caps->num_phy)
+ return -EINVAL;
+
+ ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
+
+ param->phy_id = ext_reg_cap->phy_id;
+ param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
+ param->eeprom_reg_domain_ext =
+ ext_reg_cap->eeprom_reg_domain_ext;
+ param->regcap1 = ext_reg_cap->regcap1;
+ param->regcap2 = ext_reg_cap->regcap2;
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
+ param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
+ param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
+ param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
+
+ return 0;
+}
+
+static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
+ const void *evt_buf,
+ struct ath11k_targ_cap *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath11k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = ev->phy_capability;
+ cap->max_frag_entry = ev->max_frag_entry;
+ cap->num_rf_chains = ev->num_rf_chains;
+ cap->ht_cap_info = ev->ht_cap_info;
+ cap->vht_cap_info = ev->vht_cap_info;
+ cap->vht_supp_mcs = ev->vht_supp_mcs;
+ cap->hw_min_tx_power = ev->hw_min_tx_power;
+ cap->hw_max_tx_power = ev->hw_max_tx_power;
+ cap->sys_cap_info = ev->sys_cap_info;
+ cap->min_pkt_size_enable = ev->min_pkt_size_enable;
+ cap->max_bcn_ie_size = ev->max_bcn_ie_size;
+ cap->max_num_scan_channels = ev->max_num_scan_channels;
+ cap->max_supported_macs = ev->max_supported_macs;
+ cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
+ cap->txrx_chainmask = ev->txrx_chainmask;
+ cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
+ cap->num_msdu_desc = ev->num_msdu_desc;
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_ready_parse *svc_ready = data;
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath11k_base *ab = wmi_sc->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
+ frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->desc_id = buf_id;
+ cmd->chanfreq = 0;
+ cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->frame_len = frame->len;
+ cmd->buf_len = buf_len;
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ath11k_ce_byte_swap(frame_tlv->value, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_vdev_txrx_streams *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->if_id;
+ cmd->vdev_type = param->type;
+ cmd->vdev_subtype = param->subtype;
+ cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
+ cmd->pdev_id = param->pdev_id;
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_5GHZ].rx;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ param->if_id, param->type, param->subtype,
+ macaddr, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = arg->channel.freq;
+ chan->band_center_freq1 = arg->channel.band_center_freq1;
+ if (arg->channel.mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq2 = arg->channel.band_center_freq2;
+ else
+ chan->band_center_freq2 = 0;
+
+ chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+ if (arg->channel.passive)
+ chan->info |= WMI_CHAN_INFO_PASSIVE;
+ if (arg->channel.allow_ibss)
+ chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+ if (arg->channel.allow_ht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (arg->channel.allow_vht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ if (arg->channel.allow_he)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+ if (arg->channel.ht40plus)
+ chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+ if (arg->channel.chan_radar)
+ chan->info |= WMI_CHAN_INFO_DFS;
+ if (arg->channel.freq2_radar)
+ chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+ chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ arg->channel.max_power) |
+ FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ arg->channel.max_reg_power);
+
+ chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ arg->channel.max_antenna_gain) |
+ FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ arg->channel.max_power);
+}
+
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_START_REQUEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->beacon_interval = arg->bcn_intval;
+ cmd->bcn_tx_rate = arg->bcn_tx_rate;
+ cmd->dtim_period = arg->dtim_period;
+ cmd->num_noa_descriptors = arg->num_noa_descriptors;
+ cmd->preferred_rx_streams = arg->pref_rx_streams;
+ cmd->preferred_tx_streams = arg->pref_tx_streams;
+ cmd->cac_duration_ms = arg->cac_duration_ms;
+ cmd->regdomain = arg->regdomain;
+ cmd->he_ops = arg->he_ops;
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = arg->ssid_len;
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+ }
+
+ cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath11k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*chan) - TLV_HDR_SIZE);
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ if (restart)
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->channel.freq, arg->channel.mode);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->vdev_assoc_id = aid;
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
+ cmd->peer_type = param->peer_type;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer create vdev_id %d peer_addr %pM\n",
+ param->vdev_id, param->peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->reg_domain = param->current_rd_in_use;
+ cmd->reg_domain_2g = param->current_rd_2g;
+ cmd->reg_domain_5g = param->current_rd_5g;
+ cmd->conformance_test_limit_2g = param->ctl_2g;
+ cmd->conformance_test_limit_5g = param->ctl_5g;
+ cmd->dfs_domain = param->dfs_domain;
+ cmd->pdev_id = param->pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ param->current_rd_in_use, param->current_rd_2g,
+ param->current_rd_5g, param->dfs_domain, param->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_val;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = param->peer_tid_bitmap;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ param->vdev_id, peer_addr, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = vdev_id;
+ cmd->tid = tid;
+ cmd->queue_ptr_lo = lower_32_bits(paddr);
+ cmd->queue_ptr_hi = upper_32_bits(paddr);
+ cmd->queue_no = tid;
+ cmd->ba_window_size_valid = ba_window_size_valid;
+ cmd->ba_window_size = ba_window_size;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ return ret;
+}
+
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
+ cmd->vdev_id = param->vdev_id;
+ cmd->tid_mask = param->peer_tid_bitmap;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
+ param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->sta_ps_mode = enable;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev set psmode %d vdev id %d\n",
+ enable, vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->suspend_opt = suspend_opt;
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev suspend pdev_id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev resume pdev id %d\n", pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->req_type = type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI bss chan info req type %d\n", type);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = param->param;
+ cmd->value = param->value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
+ param->vdev_id, peer_addr, param->param, param->value);
+
+ return ret;
+}
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param = param;
+ cmd->value = param_value;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI set sta ps vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->type = type;
+ cmd->delay_time_ms = delay_time_ms;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->stats_id = param->stats_id;
+ cmd->vdev_id = param->vdev_id;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI request stats 0x%x vdev id %d pdev id %d\n",
+ param->stats_id, param->vdev_id, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->bcn_ctrl_op = bcn_ctrl_op;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->tim_ie_offset = offs->tim_offset;
+ cmd->csa_switch_count_offset = offs->csa_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1];
+ cmd->buf_len = bcn->len;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+ int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = arg->key_idx;
+ cmd->key_flags = arg->key_flags;
+ cmd->key_cipher = arg->key_cipher;
+ cmd->key_len = arg->key_len;
+ cmd->key_txmic_len = arg->key_txmic_len;
+ cmd->key_rxmic_len = arg->key_rxmic_len;
+
+ if (arg->key_rsc_counter)
+ memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
+ sizeof(struct wmi_key_seq_counter));
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ return ret;
+}
+
+static inline void
+ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct peer_assoc_params *param)
+{
+ cmd->peer_flags = 0;
+
+ if (param->is_wme_set) {
+ if (param->qos_flag)
+ cmd->peer_flags |= WMI_PEER_QOS;
+ if (param->apsd_flag)
+ cmd->peer_flags |= WMI_PEER_APSD;
+ if (param->ht_flag)
+ cmd->peer_flags |= WMI_PEER_HT;
+ if (param->bw_40)
+ cmd->peer_flags |= WMI_PEER_40MHZ;
+ if (param->bw_80)
+ cmd->peer_flags |= WMI_PEER_80MHZ;
+ if (param->bw_160)
+ cmd->peer_flags |= WMI_PEER_160MHZ;
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->stbc_flag)
+ cmd->peer_flags |= WMI_PEER_STBC;
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->ldpc_flag)
+ cmd->peer_flags |= WMI_PEER_LDPC;
+
+ if (param->static_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+ if (param->dynamic_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
+ if (param->spatial_mux_flag)
+ cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ if (param->vht_flag)
+ cmd->peer_flags |= WMI_PEER_VHT;
+ if (param->he_flag)
+ cmd->peer_flags |= WMI_PEER_HE;
+ if (param->twt_requester)
+ cmd->peer_flags |= WMI_PEER_TWT_REQ;
+ if (param->twt_responder)
+ cmd->peer_flags |= WMI_PEER_TWT_RESP;
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (param->auth_flag)
+ cmd->peer_flags |= WMI_PEER_AUTH;
+ if (param->need_ptk_4_way)
+ cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ else
+ cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY;
+ if (param->need_gtk_2_way)
+ cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ /* safe mode bypass the 4-way handshake */
+ if (param->safe_mode_enabled)
+ cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY);
+
+ if (param->is_pmf_enabled)
+ cmd->peer_flags |= WMI_PEER_PMF;
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (param->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (param->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= ~WMI_PEER_HT;
+}
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct wmi_vht_rate_set *mcs;
+ struct wmi_he_rate_set *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * param->peer_he_mcs_count);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+
+ cmd->peer_new_assoc = param->peer_new_assoc;
+ cmd->peer_associd = param->peer_associd;
+
+ ath11k_wmi_copy_peer_flags(cmd, param);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
+
+ cmd->peer_rate_caps = param->peer_rate_caps;
+ cmd->peer_caps = param->peer_caps;
+ cmd->peer_listen_intval = param->peer_listen_intval;
+ cmd->peer_ht_caps = param->peer_ht_caps;
+ cmd->peer_max_mpdu = param->peer_max_mpdu;
+ cmd->peer_mpdu_density = param->peer_mpdu_density;
+ cmd->peer_vht_caps = param->peer_vht_caps;
+ cmd->peer_phymode = param->peer_phymode;
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
+ cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
+ cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+ cmd->peer_he_ops = param->peer_he_ops;
+ memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
+ sizeof(param->peer_he_cap_phyinfo));
+ memcpy(&cmd->peer_ppet, &param->peer_ppet,
+ sizeof(param->peer_ppet));
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
+ memcpy(ptr, param->peer_legacy_rates.rates,
+ param->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
+ memcpy(ptr, param->peer_ht_rates.rates,
+ param->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
+
+ cmd->peer_nss = param->peer_nss;
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
+
+ if (param->vht_capable) {
+ mcs->rx_max_rate = param->rx_max_rate;
+ mcs->rx_mcs_set = param->rx_mcs_set;
+ mcs->tx_max_rate = param->tx_max_rate;
+ mcs->tx_mcs_set = param->tx_mcs_set;
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = param->peer_he_mcs_count;
+
+ ptr += sizeof(*mcs);
+
+ len = param->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < param->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_HE_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*he_mcs) - TLV_HDR_SIZE);
+
+ he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ ptr += sizeof(*he_mcs);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, param->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ return ret;
+}
+
+void ath11k_wmi_start_scan_init(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->num_bssid = 1;
+}
+
+static inline void
+ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct scan_req_params *param)
+{
+ /* Scan events subscription */
+ if (param->scan_ev_started)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
+ if (param->scan_ev_completed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
+ if (param->scan_ev_bss_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
+ if (param->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
+ if (param->scan_ev_dequeued)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
+ if (param->scan_ev_preempted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
+ if (param->scan_ev_start_failed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
+ if (param->scan_ev_restarted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
+ if (param->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
+ if (param->scan_ev_suspended)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
+ if (param->scan_ev_resumed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (param->scan_f_passive)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ if (param->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
+ if (param->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
+ if (param->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
+ if (param->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
+ if (param->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
+ if (param->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+ if (param->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ if (param->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ if (param->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (param->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
+ if (param->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
+ if (param->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
+ if (param->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
+ if (param->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
+ if (param->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ if (param->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
+
+ /* for adaptive scan mode using 3 bits (21 - 23 bits) */
+ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
+ param->adaptive_dwell_time_mode);
+}
+
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct wmi_ssid *ssid = NULL;
+ struct wmi_mac_addr *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u8 extraie_len_with_pad = 0;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_chan)
+ len += params->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_ssids)
+ len += params->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_bssid)
+ len += sizeof(*bssid) * params->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (params->extraie.len)
+ extraie_len_with_pad =
+ roundup(params->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->scan_id = params->scan_id;
+ cmd->scan_req_id = params->scan_req_id;
+ cmd->vdev_id = params->vdev_id;
+ cmd->scan_priority = params->scan_priority;
+ cmd->notify_scan_events = params->notify_scan_events;
+
+ ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
+
+ cmd->dwell_time_active = params->dwell_time_active;
+ cmd->dwell_time_active_2g = params->dwell_time_active_2g;
+ cmd->dwell_time_passive = params->dwell_time_passive;
+ cmd->min_rest_time = params->min_rest_time;
+ cmd->max_rest_time = params->max_rest_time;
+ cmd->repeat_probe_time = params->repeat_probe_time;
+ cmd->probe_spacing_time = params->probe_spacing_time;
+ cmd->idle_time = params->idle_time;
+ cmd->max_scan_time = params->max_scan_time;
+ cmd->probe_delay = params->probe_delay;
+ cmd->burst_duration = params->burst_duration;
+ cmd->num_chan = params->num_chan;
+ cmd->num_bssid = params->num_bssid;
+ cmd->num_ssids = params->num_ssids;
+ cmd->ie_len = params->extraie.len;
+ cmd->n_probes = params->n_probes;
+
+ ptr += sizeof(*cmd);
+
+ len = params->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = (u32 *)ptr;
+
+ for (i = 0; i < params->num_chan; ++i)
+ tmp_ptr[i] = params->chan_list[i];
+
+ ptr += len;
+
+ len = params->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (params->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < params->num_ssids; ++i) {
+ ssid->ssid_len = params->ssid[i].length;
+ memcpy(ssid->ssid, params->ssid[i].ssid,
+ params->ssid[i].length);
+ ssid++;
+ }
+ }
+
+ ptr += (params->num_ssids * sizeof(*ssid));
+ len = params->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (params->num_bssid) {
+ for (i = 0; i < params->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ params->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += params->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (params->extraie.len)
+ memcpy(ptr, params->extraie.ptr,
+ params->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->requestor = param->requester;
+ cmd->scan_id = param->scan_id;
+ cmd->pdev_id = param->pdev_id;
+ /* stop the scan with the corresponding scan_id */
+ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = WMI_SCAN_STOP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = WMI_SCN_STOP_VAP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath11k_warn(ar->ab, "invalid scan cancel param %d",
+ param->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan_info;
+ struct channel_param *tchan_info;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *reg1, *reg2;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ sizeof(*chan_info) * chan_list->nallchans;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI no.of chan = %d len = %d\n", chan_list->nallchans, len);
+ cmd->pdev_id = chan_list->pdev_id;
+ cmd->num_scan_chans = chan_list->nallchans;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * chan_list->nallchans;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ ptr += TLV_HDR_SIZE;
+
+ tchan_info = &chan_list->ch_param[0];
+
+ for (i = 0; i < chan_list->nallchans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = tchan_info->mhz;
+ chan_info->band_center_freq1 = tchan_info->cfreq1;
+ chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+ if (tchan_info->is_chan_passive)
+ chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+ if (tchan_info->allow_he)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+ else if (tchan_info->allow_vht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ else if (tchan_info->allow_ht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (tchan_info->half_rate)
+ chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+ if (tchan_info->quarter_rate)
+ chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+
+ chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+ tchan_info->phy_mode);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+ tchan_info->minpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ tchan_info->maxpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ tchan_info->maxregpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+ tchan_info->reg_class_id);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ tchan_info->antennamax);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI chan scan list chan[%d] = %u\n",
+ i, chan_info->mhz);
+
+ ptr += sizeof(*chan_info);
+
+ tchan_info++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->wmm_param_type = 0;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*wmm_param) - TLV_HDR_SIZE);
+
+ wmm_param->aifs = wmi_wmm_arg->aifs;
+ wmm_param->cwmin = wmi_wmm_arg->cwmin;
+ wmm_param->cwmax = wmi_wmm_arg->cwmax;
+ wmm_param->txoplimit = wmi_wmm_arg->txop;
+ wmm_param->acm = wmi_wmm_arg->acm;
+ wmm_param->no_ack = wmi_wmm_arg->no_ack;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_pktlog_filter_cmd *cmd;
+ struct wmi_pdev_pktlog_filter_info *info;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->num_mac = 1;
+ cmd->enable = enable;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
+
+ ptr += TLV_HDR_SIZE;
+ info = ptr;
+
+ ether_addr_copy(info->peer_macaddr.addr, addr);
+ info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*info) - TLV_HDR_SIZE);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_FILTER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_INIT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ switch (init_cc_params.flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy((u8 *)&cmd->cc_info.alpha2,
+ init_cc_params.cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
+ cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
+ cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+
+out:
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->evlist = pktlog_filter;
+ cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_disable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ cmd->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ cmd->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ cmd->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ cmd->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ cmd->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ cmd->mbss_support = 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = he_obss_pd->enable;
+ cmd->obss_min = he_obss_pd->min_offset;
+ cmd->obss_max = he_obss_pd->max_offset;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+static void
+ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
+ struct wmi_host_pdev_band_to_mac *band_to_mac)
+{
+ u8 i;
+ struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ band_to_mac[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
+ struct target_resource_config *tg_cfg)
+{
+ wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
+ wmi_cfg->num_peers = tg_cfg->num_peers;
+ wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
+ wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
+ wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
+ wmi_cfg->num_tids = tg_cfg->num_tids;
+ wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
+ wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
+ wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
+ wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
+ wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
+ wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
+ wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
+ wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
+ wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
+ wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
+ wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
+ wmi_cfg->roam_offload_max_ap_profiles =
+ tg_cfg->roam_offload_max_ap_profiles;
+ wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
+ wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
+ wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
+ wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
+ wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
+ wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
+ wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
+ wmi_cfg->vow_config = tg_cfg->vow_config;
+ wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
+ wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
+ wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
+ wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
+ wmi_cfg->num_tdls_conn_table_entries =
+ tg_cfg->num_tdls_conn_table_entries;
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ tg_cfg->beacon_tx_offload_max_vdev;
+ wmi_cfg->num_multicast_filter_entries =
+ tg_cfg->num_multicast_filter_entries;
+ wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
+ wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
+ wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ tg_cfg->max_tdls_concurrent_sleep_sta;
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ tg_cfg->max_tdls_concurrent_buffer_sta;
+ wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
+ wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
+ wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
+ wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
+ wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
+ wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
+ wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
+ wmi_cfg->flag1 = tg_cfg->atf_config;
+ wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
+ wmi_cfg->sched_params = tg_cfg->sched_params;
+ wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
+ wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
+}
+
+static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
+ struct wmi_init_cmd_param *param)
+{
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct wmi_resource_config *cfg;
+ struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
+ struct wmi_pdev_band_to_mac *band_to_mac;
+ struct wlan_host_mem_chunk *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (param->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
+
+ cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct wlan_host_mem_chunk);
+
+ for (idx = 0; idx < param->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
+ host_mem_chunks[idx].size = param->mem_chunks[idx].len;
+ host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
+ param->mem_chunks[idx].req_id,
+ (u64)param->mem_chunks[idx].paddr,
+ param->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = param->num_mem_chunks;
+ len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
+ hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*hw_mode) - TLV_HDR_SIZE);
+
+ hw_mode->hw_mode_index = param->hw_mode_id;
+ hw_mode->num_band_to_mac = param->num_band_to_mac;
+
+ ptr += sizeof(*hw_mode);
+
+ len = param->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < param->num_band_to_mac; idx++) {
+ band_to_mac = (void *)ptr;
+
+ band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BAND_TO_MAC) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+ band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
+ band_to_mac->start_freq =
+ param->band_to_mac[idx].start_freq;
+ band_to_mac->end_freq =
+ param->band_to_mac[idx].end_freq;
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_cmd_init(struct ath11k_base *ab)
+{
+ struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
+ struct wmi_init_cmd_param init_param;
+ struct target_resource_config config;
+
+ memset(&init_param, 0, sizeof(init_param));
+ memset(&config, 0, sizeof(config));
+
+ config.num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config.num_peers = TARGET_NUM_PEERS(DBS);
+ config.num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config.num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config.num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config.num_peers = TARGET_NUM_PEERS(SINGLE);
+ config.num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config.num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config.num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config.num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config.ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config.rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config.rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config.scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config.bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config.roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config.roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config.num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config.num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config.mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config.tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config.num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config.dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.vow_config = TARGET_VOW_CONFIG;
+ config.gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config.num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config.rx_batchmode = TARGET_RX_BATCHMODE;
+ config.peer_map_unmap_v2_support = 1;
+ config.twt_ap_pdev_count = 2;
+ config.twt_ap_sta_count = 1000;
+
+ memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
+
+ init_param.res_cfg = &wmi_sc->wlan_resource_config;
+ init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
+ init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
+ init_param.mem_chunks = wmi_sc->mem_chunks;
+
+ if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE)
+ init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ init_param.num_band_to_mac = ab->num_radios;
+
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+
+ return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = hw_mode_cap->phy_id_map;
+ while (phy_map) {
+ svc_rdy_ext->tot_phy_id++;
+ phy_map = phy_map >> 1;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ while (i < svc_rdy_ext->n_hw_mode_caps) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = hw_mode_caps->hw_mode_id;
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ i++;
+ }
+
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
+ GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath11k_hal_reg_capabilities_ext reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
+ ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+
+ memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
+ &reg_cap, sizeof(reg_cap));
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
+ u32 phy_id_map;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
+ svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
+
+ soc->num_radios = 0;
+ phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext->hw_caps,
+ svc_rdy_ext->hw_mode_caps,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->mac_phy_caps,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[soc->num_radios]);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->param);
+ if (ret) {
+ ath11k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
+ svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ complete(&ab->wmi_ab.service_ready);
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+}
+
+static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+ vdev_rsp->vdev_id = ev->vdev_id;
+ vdev_rsp->requestor_id = ev->requestor_id;
+ vdev_rsp->resp_type = ev->resp_type;
+ vdev_rsp->status = ev->status;
+ vdev_rsp->chain_mask = ev->chain_mask;
+ vdev_rsp->smps_mode = ev->smps_mode;
+ vdev_rsp->mac_id = ev->mac_id;
+ vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+ vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct cur_reg_rule
+*create_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_rule_struct *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ FIELD_GET(REG_RULE_START_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].end_freq =
+ FIELD_GET(REG_RULE_END_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].max_bw =
+ FIELD_GET(REG_RULE_MAX_BW,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].reg_power =
+ FIELD_GET(REG_RULE_REG_PWR,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].ant_gain =
+ FIELD_GET(REG_RULE_ANT_GAIN,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].flags =
+ FIELD_GET(REG_RULE_FLAGS,
+ wmi_reg_rule[count].flag_info);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
+ struct wmi_regulatory_rule_struct *wmi_reg_rule;
+ u32 num_2g_reg_rules, num_5g_reg_rules;
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
+ if (!chan_list_event_hdr) {
+ ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules;
+ reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules;
+
+ if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) {
+ ath11k_warn(ab, "No regulatory rules available in the event info\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
+ REG_ALPHA2_LEN);
+ reg_info->dfs_region = chan_list_event_hdr->dfs_region;
+ reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+ reg_info->num_phy = chan_list_event_hdr->num_phy;
+ reg_info->phy_id = chan_list_event_hdr->phy_id;
+ reg_info->ctry_code = chan_list_event_hdr->country_id;
+ reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
+ if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS)
+ reg_info->status_code = REG_SET_CC_STATUS_PASS;
+ else if (chan_list_event_hdr->status_code == WMI_REG_CURRENT_ALPHA2_NOT_FOUND)
+ reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
+ else if (chan_list_event_hdr->status_code == WMI_REG_INIT_ALPHA2_NOT_FOUND)
+ reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_CHANGE_NOT_ALLOWED)
+ reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_NO_MEMORY)
+ reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_FAIL)
+ reg_info->status_code = REG_SET_CC_STATUS_FAIL;
+
+ reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g;
+ reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g;
+ reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g;
+ reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g;
+
+ num_2g_reg_rules = reg_info->num_2g_reg_rules;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
+ __func__, reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2g, reg_info->max_bw_2g,
+ reg_info->min_bw_5g, reg_info->max_bw_5g);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__,
+ num_2g_reg_rules, num_5g_reg_rules);
+
+ wmi_reg_rule =
+ (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
+ + sizeof(*chan_list_event_hdr)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2g_reg_rules) {
+ reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_2g_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (num_5g_reg_rules) {
+ wmi_reg_rule += num_2g_reg_rules;
+ reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_5g_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
+ u32 len, u32 *vdev_id,
+ u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+ *tx_status = ev->tx_status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct mgmt_rx_event_params *hdr)
+{
+ const void **tb;
+ const struct wmi_mgmt_rx_hdr *ev;
+ const u8 *frame;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_RX_HDR];
+ frame = tb[WMI_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ ath11k_warn(ab, "failed to fetch mgmt rx hdr");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = ev->pdev_id;
+ hdr->channel = ev->channel;
+ hdr->snr = ev->snr;
+ hdr->rate = ev->rate;
+ hdr->phy_mode = ev->phy_mode;
+ hdr->buf_len = ev->buf_len;
+ hdr->status = ev->status;
+ hdr->flags = ev->flags;
+ hdr->rssi = ev->rssi;
+ hdr->tsf_delta = ev->tsf_delta;
+ memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ ath11k_ce_byte_swap(skb->data, hdr->buf_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
+ u32 status)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, desc_id);
+
+ if (!msdu) {
+ ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ WARN_ON_ONCE(atomic_read(&ar->num_pending_mgmt_tx) == 0);
+ atomic_dec(&ar->num_pending_mgmt_tx);
+
+ return 0;
+}
+
+static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_event_scan_started(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_RUNNING;
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ break;
+ }
+}
+
+static const char *
+ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath11k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
+ u32 len, struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = ev->key_idx;
+ arg->key_flags = ev->key_flags;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = ev->vdev_id;
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = src->chan_nf;
+ dst->tx_frame_count = src->tx_frame_count;
+ dst->rx_frame_count = src->rx_frame_count;
+ dst->rx_clear_count = src->rx_clear_count;
+ dst->cycle_count = src->cycle_count;
+ dst->phy_err_count = src->phy_err_count;
+ dst->chan_tx_power = src->chan_tx_pwr;
+}
+
+static void
+ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = src->comp_queued;
+ dst->comp_delivered = src->comp_delivered;
+ dst->msdu_enqued = src->msdu_enqued;
+ dst->mpdu_enqued = src->mpdu_enqued;
+ dst->wmm_drop = src->wmm_drop;
+ dst->local_enqued = src->local_enqued;
+ dst->local_freed = src->local_freed;
+ dst->hw_queued = src->hw_queued;
+ dst->hw_reaped = src->hw_reaped;
+ dst->underrun = src->underrun;
+ dst->tx_abort = src->tx_abort;
+ dst->mpdus_requed = src->mpdus_requed;
+ dst->tx_ko = src->tx_ko;
+ dst->data_rc = src->data_rc;
+ dst->self_triggers = src->self_triggers;
+ dst->sw_retry_failure = src->sw_retry_failure;
+ dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
+ dst->pdev_cont_xretry = src->pdev_cont_xretry;
+ dst->pdev_tx_timeout = src->pdev_tx_timeout;
+ dst->pdev_resets = src->pdev_resets;
+ dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
+ dst->phy_underrun = src->phy_underrun;
+ dst->txop_ovf = src->txop_ovf;
+}
+
+static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
+ dst->status_rcvd = src->status_rcvd;
+ dst->r0_frags = src->r0_frags;
+ dst->r1_frags = src->r1_frags;
+ dst->r2_frags = src->r2_frags;
+ dst->r3_frags = src->r3_frags;
+ dst->htt_msdus = src->htt_msdus;
+ dst->htt_mpdus = src->htt_mpdus;
+ dst->loc_msdus = src->loc_msdus;
+ dst->loc_mpdus = src->loc_mpdus;
+ dst->oversize_amsdu = src->oversize_amsdu;
+ dst->phy_errs = src->phy_errs;
+ dst->phy_err_drop = src->phy_err_drop;
+ dst->mpdu_errs = src->mpdu_errs;
+}
+
+static void
+ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
+ struct ath11k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = src->vdev_id;
+ dst->beacon_snr = src->beacon_snr;
+ dst->data_snr = src->data_snr;
+ dst->num_rx_frames = src->num_rx_frames;
+ dst->num_rts_fail = src->num_rts_fail;
+ dst->num_rts_success = src->num_rts_success;
+ dst->num_rx_err = src->num_rx_err;
+ dst->num_rx_discard = src->num_rx_discard;
+ dst->num_tx_not_acked = src->num_tx_not_acked;
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] = src->num_tx_frames[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] = src->tx_rate_history[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
+}
+
+static void
+ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
+ struct ath11k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = src->vdev_id;
+ dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
+ dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_stats_event *ev;
+ const void *data;
+ int i, ret;
+ u32 len = skb->len;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_STATS_EVENT];
+ data = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ ath11k_warn(ab, "failed to fetch update stats ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
+ ev->pdev_id,
+ ev->num_pdev_stats, ev->num_vdev_stats,
+ ev->num_bcn_stats);
+
+ stats->pdev_id = ev->pdev_id;
+ stats->stats_id = 0;
+
+ for (i = 0; i < ev->num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath11k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < ev->num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+ struct ath11k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < ev->num_bcn_stats; i++) {
+ const struct wmi_bcn_stats *src;
+ struct ath11k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_bcn_stats(src, dst);
+ list_add_tail(&dst->list, &stats->bcn);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static void
+ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
+ u8 *vif_macaddr;
+ int i;
+
+ /* VDEV stats has all the active VDEVs of other PDEVs as well,
+ * ignoring those not part of requested PDEV
+ */
+ if (!arvif)
+ return;
+
+ vif_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_bcn *bcn,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
+ u8 *vdev_macaddr;
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
+ bcn->vdev_id);
+ return;
+ }
+
+ vdev_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vdev_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ const struct ath11k_fw_stats_pdev *pdev;
+ const struct ath11k_fw_stats_vdev *vdev;
+ const struct ath11k_fw_stats_bcn *bcn;
+ size_t num_bcn;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats_id == WMI_REQUEST_PDEV_STAT) {
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath11k_warn(ar->ab, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_VDEV_STAT) {
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list)
+ ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_BCN_STAT) {
+ num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath11k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list)
+ ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ return alpha[0] == '0' && alpha[1] == '0';
+}
+
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct cur_regulatory_info *reg_info = NULL;
+ struct ieee80211_regdomain *regd = NULL;
+ bool intersect = false;
+ int ret = 0, pdev_idx;
+ struct ath11k *ar;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info) {
+ ret = -ENOMEM;
+ goto fallback;
+ }
+
+ ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
+ if (ret) {
+ ath11k_warn(ab, "failed to extract regulatory info from received event\n");
+ goto fallback;
+ }
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ if (pdev_idx >= ab->num_radios)
+ goto fallback;
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto mem_free;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * fw is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock(&ab->base_lock);
+
+ goto mem_free;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+mem_free:
+ if (reg_info) {
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+ kfree(reg_info);
+ }
+ return ret;
+}
+
+static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event *fixed_param;
+ struct wmi_mac_addr *addr_list;
+ struct ath11k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ fixed_param = (struct wmi_ready_event *)ptr;
+ ab->wlan_init_status = fixed_param->status;
+ rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr);
+ ab->wmi_ready = true;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct wmi_mac_addr *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+
+ if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath11k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ /* TODO: Do we need to validate whether ath11k_peer_find() return NULL
+ * Why this is needed when there is HTT event for peer delete
+ */
+}
+
+static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath11k *ar;
+ u32 status;
+
+ if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath11k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ status = vdev_start_resp.status;
+
+ if (WARN_ON_ONCE(status)) {
+ ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath11k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ u32 vdev_id, tx_status;
+
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
+ &vdev_id, &tx_status) != 0) {
+ ath11k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+}
+
+static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_vdev_stop_status(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct mgmt_rx_event_params rx_ev = {0};
+ struct ath11k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT, "mgmt rx event status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+ status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
+ * Don't clear that. Also, FW delivers broadcast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ /* TODO: Pending handle beacon implementation
+ *if (ieee80211_is_beacon(hdr->frame_control))
+ * ath11k_mac_handle_beacon(ar, skb);
+ */
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_ABORTING &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_scan_event scan_ev = {0};
+
+ if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath11k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED)
+ ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id);
+ else
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
+ scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
+ scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
+ ath11k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (scan_ev.event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath11k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath11k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath11k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath11k_warn(ab, "received scan start failure event\n");
+ ath11k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath11k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath11k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "peer sta kickout event %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath11k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
+ ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (roam_ev.reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ /* TODO: Pending beacon miss and connection_loss_work
+ * implementation
+ * ath11k_mac_handle_beacon_miss(ar, vdev_id);
+ */
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {0};
+ struct ath11k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = ch_info_ev.noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
+ survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath11k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
+ bss_ch_info_ev.rx_clear_count_low;
+
+ total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
+ bss_ch_info_ev.cycle_count_low;
+
+ tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
+ bss_ch_info_ev.tx_cycle_count_low;
+
+ rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_cycle_count_low;
+
+ rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_bss_cycle_count_low;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, bss_ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = bss_ch_info_ev.noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath11k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath11k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_service_available_event *ev;
+ int ret;
+ int i, j;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch svc available ev");
+ kfree(tb);
+ return;
+ }
+
+ /* TODO: Use wmi_service_segment_offset information to get the service
+ * especially when more services are advertised in multiple sevice
+ * available events.
+ */
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
+ ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+
+ kfree(tb);
+}
+
+static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath11k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+}
+
+static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ ath11k_debug_fw_stats_process(ab, skb);
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev ctl failsafe check ev status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath11k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
+ const struct wmi_pdev_csa_switch_ev *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath11k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < ev->num_vdevs; i++) {
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->csa_active)
+ ieee80211_csa_finish(arvif->vif);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_csa_switch_ev *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_radar_ev *ev;
+ struct ath11k *ar;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath11k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath11k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath11k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath11k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath11k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath11k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath11k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath11k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath11k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath11k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath11k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath11k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath11k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath11k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath11k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath11k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath11k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath11k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath11k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ /* add Unsupported events here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_TWT_ENABLE_EVENTID:
+ case WMI_TWT_DISABLE_EVENTID:
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ /* TODO: Add remaining events */
+ default:
+ ath11k_warn(ab, "Unknown eventid: 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+
+ return 0;
+}
+
+static int
+ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * ut_cmd.num_args;
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < ut_cmd.num_args; i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI unit test : module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ return ret;
+}
+
+int ath11k_wmi_simulate_radar(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = arvif->vdev_id;
+ wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
+ wmi_ut.num_args = DFS_MAX_TEST_ARGS;
+ wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath11k_wmi_connect(struct ath11k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > MAX_RADIOS)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath11k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi_handle;
+
+ if (pdev_id >= MAX_RADIOS)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath11k_wmi_attach(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath11k_wmi_detach(struct ath11k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath11k_wmi_pdev_detach(ab, i);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
new file mode 100644
index 000000000000..1fde15c762ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -0,0 +1,4764 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_WMI_H
+#define ATH11K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+struct ath11k_base;
+struct ath11k;
+struct ath11k_fw_stats;
+
+#define PSOC_HOST_MAX_NUM_SS (8)
+
+/* defines to set Packet extension values whic can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ u32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ u32 header;
+ u8 value[0];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH11K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+/*
+ * HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum {
+ WMI_HOST_WLAN_2G_CAP = 0x1,
+ WMI_HOST_WLAN_5G_CAP = 0x2,
+ WMI_HOST_WLAN_2G_5G_CAP = 0x3,
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+enum wmi_tlv_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_TX_OFDMA_CPLEN,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ /* TODO add all the missing cmds */
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+
+ WMI_MAX_EXT_SERVICE
+
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_2G 0
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_5G 1
+#define WMI_NUM_SUPPORTED_BAND_MAX 2
+
+#define WMI_PEER_MIMO_PS_STATE 0x1
+#define WMI_PEER_AMPDU 0x2
+#define WMI_PEER_AUTHORIZE 0x3
+#define WMI_PEER_CHWIDTH 0x4
+#define WMI_PEER_NSS 0x5
+#define WMI_PEER_USE_4ADDR 0x6
+#define WMI_PEER_MEMBERSHIP 0x7
+#define WMI_PEER_USERPOS 0x8
+#define WMI_PEER_CRIT_PROTO_HINT_ENABLED 0x9
+#define WMI_PEER_TX_FAIL_CNT_THR 0xA
+#define WMI_PEER_SET_HW_RETRY_CTS2S 0xB
+#define WMI_PEER_IBSS_ATIM_WINDOW_LENGTH 0xC
+#define WMI_PEER_PHYMODE 0xD
+#define WMI_PEER_USE_FIXED_PWR 0xE
+#define WMI_PEER_PARAM_FIXED_RATE 0xF
+#define WMI_PEER_SET_MU_WHITELIST 0x10
+#define WMI_PEER_SET_MAX_TX_RATE 0x11
+#define WMI_PEER_SET_MIN_TX_RATE 0x12
+#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+struct wmi_host_pdev_band_to_mac {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath11k_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath11k_service_ext_param {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath11k_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath11k_hw_mode_caps {
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH11K_11B_SUPPORT BIT(0)
+#define ATH11K_11G_SUPPORT BIT(1)
+#define ATH11K_11A_SUPPORT BIT(2)
+#define ATH11K_11N_SUPPORT BIT(3)
+#define ATH11K_11AC_SUPPORT BIT(4)
+#define ATH11K_11AX_SUPPORT BIT(5)
+
+struct ath11k_hal_reg_capabilities_ext {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct wlan_host_mem_chunk {
+ u32 tlv_header;
+ u32 req_id;
+ u32 ptr;
+ u32 size;
+} __packed;
+
+struct wmi_host_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct wmi_init_cmd_param {
+ u32 tlv_header;
+ struct target_resource_config *res_cfg;
+ u8 num_mem_chunks;
+ struct wmi_host_mem_chunk *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct wmi_pdev_band_to_mac {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+} __packed;
+
+struct wmi_pdev_set_hw_mode_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 hw_mode_index;
+ u32 num_band_to_mac;
+} __packed;
+
+struct wmi_ppe_threshold {
+ u32 numss_m1; /** NSS - 1*/
+ union {
+ u32 ru_count;
+ u32 ru_mask;
+ } __packed;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct wmi_abi_version {
+ u32 abi_version_0;
+ u32 abi_version_1;
+ u32 abi_version_ns_0;
+ u32 abi_version_ns_1;
+ u32 abi_version_ns_2;
+ u32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ u32 tlv_header;
+ struct wmi_abi_version host_abi_vers;
+ u32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_resource_config {
+ u32 tlv_header;
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 flag1;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 alloc_frag_desc_for_data_pkt;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 max_num_dbs_scan_duty_cycle;
+ u32 max_num_group_keys;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+} __packed;
+
+struct wmi_service_ready_event {
+ u32 fw_build_vers;
+ struct wmi_abi_version fw_abi_vers;
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 num_mem_reqs;
+ u32 max_num_scan_channels;
+ u32 hw_bd_id;
+ u32 hw_bd_info[HW_BD_INFO_SIZE];
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct wmi_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 fw_build_vers_ext;
+ u32 max_nlo_ssids;
+ u32 max_bssid_indicator;
+ u32 he_cap_info_ext;
+} __packed;
+
+struct wmi_soc_mac_phy_hw_mode_caps {
+ u32 num_hw_modes;
+ u32 num_chainmask_tables;
+} __packed;
+
+struct wmi_hw_mode_capabilities {
+ u32 tlv_header;
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+
+struct wmi_mac_phy_capabilities {
+ u32 hw_mode_id;
+ u32 pdev_id;
+ u32 phy_id;
+ u32 supported_flags;
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 max_bw_supported_2g;
+ u32 ht_cap_info_2g;
+ u32 vht_cap_info_2g;
+ u32 vht_supp_mcs_2g;
+ u32 he_cap_info_2g;
+ u32 he_supp_mcs_2g;
+ u32 tx_chain_mask_2g;
+ u32 rx_chain_mask_2g;
+ u32 max_bw_supported_5g;
+ u32 ht_cap_info_5g;
+ u32 vht_cap_info_5g;
+ u32 vht_supp_mcs_5g;
+ u32 he_cap_info_5g;
+ u32 he_supp_mcs_5g;
+ u32 tx_chain_mask_5g;
+ u32 rx_chain_mask_5g;
+ u32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ u32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct wmi_ppe_threshold he_ppet2g;
+ struct wmi_ppe_threshold he_ppet5g;
+ u32 chainmask_table_id;
+ u32 lmac_id;
+ u32 he_cap_info_2g_ext;
+ u32 he_cap_info_5g_ext;
+ u32 he_cap_info_internal;
+} __packed;
+
+struct wmi_hal_reg_capabilities_ext {
+ u32 tlv_header;
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+} __packed;
+
+struct wmi_soc_hal_reg_capabilities {
+ u32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_ready_event {
+ struct wmi_abi_version fw_abi_vers;
+ struct wmi_mac_addr mac_addr;
+ u32 status;
+ u32 num_dscp_table;
+ u32 num_extra_mac_addr;
+ u32 num_total_peers;
+ u32 num_extra_peers;
+} __packed;
+
+struct wmi_service_available_event {
+ u32 wmi_service_segment_offset;
+ u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath11k_pdev_wmi {
+ struct ath11k_wmi_base *wmi_ab;
+ enum ath11k_htc_ep_id eid;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 rx_decap_mode;
+};
+
+struct vdev_create_params {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+};
+
+struct wmi_vdev_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+ u32 num_cfg_txrx_streams;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_vdev_txrx_streams {
+ u32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+ struct wmi_mac_addr trans_bssid;
+ u32 profile_idx;
+ u32 profile_num;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+
+struct wmi_ssid {
+ u32 ssid_len;
+ u32 ssid[8];
+} __packed;
+
+#define ATH11K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 requestor_id;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u32 flags;
+ struct wmi_ssid ssid;
+ u32 bcn_tx_rate;
+ u32 bcn_txpower;
+ u32 num_noa_descriptors;
+ u32 disable_hw_ack;
+ u32 preferred_tx_streams;
+ u32 preferred_rx_streams;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+#define WMI_MAC_MAX_SSID_LENGTH 32
+struct mac_ssid {
+ u8 length;
+ u8 mac_ssid[WMI_MAC_MAX_SSID_LENGTH];
+} __packed;
+
+struct wmi_p2p_noa_descriptor {
+ u32 type_count;
+ u32 duration;
+ u32 interval;
+ u32 start_time;
+};
+
+struct channel_param {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+} __packed;
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+static inline const char *ath11k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+};
+
+struct peer_create_params {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct peer_delete_params {
+ u8 vdev_id;
+};
+
+struct peer_flush_params {
+ u32 peer_tid_bitmap;
+ u8 vdev_id;
+};
+
+struct pdev_set_regdomain_params {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct rx_reorder_queue_remove_params {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HECAP_PHYDWORD_0 0
+#define HECAP_PHYDWORD_1 1
+#define HECAP_PHYDWORD_2 2
+
+#define HECAP_PHY_SU_BFER BIT(31)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(22)
+#define HECAP_PHY_UL_MUOFDMA BIT(23)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFER, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFEE, hecap_phy[HECAP_PHYDWORD_1])
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_MU_BFER, hecap_phy[HECAP_PHYDWORD_1])
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUMIMO, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUOFDMA, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+struct pdev_params {
+ u32 param_id;
+ u32 param_value;
+};
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 queue_ptr_lo;
+ u32 queue_ptr_hi;
+ u32 queue_no;
+ u32 ba_window_size_valid;
+ u32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid_mask;
+} __packed;
+
+struct gpio_config_params {
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+enum wmi_gpio_type {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN
+};
+
+enum wmi_gpio_intr_type {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_gpio_config_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+struct gpio_output_params {
+ u32 gpio_num;
+ u32 set;
+};
+
+struct wmi_gpio_output_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 set;
+};
+
+struct set_fwtest_params {
+ u32 arg;
+ u32 value;
+};
+
+struct wmi_fwtest_set_param_cmd_param {
+ u32 tlv_header;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct wmi_pdev_set_param_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ u32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ u32 req_type;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 reg_domain;
+ u32 reg_domain_2g;
+ u32 reg_domain_5g;
+ u32 conformance_test_limit_2g;
+ u32 conformance_test_limit_5g;
+ u32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SCAN_MAX_NUM_SSID 10
+#define WLAN_SCAN_MAX_NUM_BSSID 10
+#define WLAN_SCAN_MAX_NUM_CHANNELS 40
+
+#define WLAN_SSID_MAX_LEN 32
+
+struct element_info {
+ u32 len;
+ u8 *ptr;
+};
+
+struct wlan_ssid {
+ u8 length;
+ u8 ssid[WLAN_SSID_MAX_LEN];
+};
+
+#define WMI_IE_BITMAP_SIZE 8
+
+#define WMI_SCAN_MAX_NUM_SSID 0x0A
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ u32 tlv_header;
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 ie_len;
+ u32 n_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ u32 num_vendor_oui;
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active_2g;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
+#define WMI_SCAN_DWELL_MODE_SHIFT 21
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+#define WMI_SCAN_SET_DWELL_MODE(flag, mode) \
+ ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
+ WMI_SCAN_DWELL_MODE_MASK))
+
+struct scan_req_params {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ union {
+ struct {
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ };
+ u32 scan_events;
+ };
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ union {
+ struct {
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ };
+ u32 scan_flags;
+ };
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+ u32 notify_scan_events;
+ struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct element_info extraie;
+ struct element_info htcap;
+ struct element_info vhtcap;
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH11K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct scan_cancel_param {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_bcn_send_from_host_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 data_len;
+ union {
+ u32 frag_ptr;
+ u32 frag_ptr_lo;
+ };
+ u32 frame_ctrl;
+ u32 dtim_flag;
+ u32 bcn_antenna;
+ u32 frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct wmi_channel {
+ u32 tlv_header;
+ u32 mhz;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 info;
+ u32 reg_info_1;
+ u32 reg_info_2;
+} __packed;
+
+struct wmi_mgmt_params {
+ void *tx_frame;
+ u16 frm_len;
+ u8 vdev_id;
+ u16 chanfreq;
+ void *pdata;
+ u16 desc_id;
+ u8 *macaddr;
+ void *qdf_ctx;
+};
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH11K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH11K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ u32 tlv_header;
+ u32 type;
+ u32 delay_time_ms;
+};
+
+struct wmi_vdev_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PEER_STAT = BIT(0),
+ WMI_REQUEST_AP_STAT = BIT(1),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCNFLT_STAT = BIT(4),
+ WMI_REQUEST_VDEV_RATE_STAT = BIT(5),
+ WMI_REQUEST_INST_STAT = BIT(6),
+ WMI_REQUEST_MIB_STAT = BIT(7),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_CONGESTION_STAT = BIT(9),
+ WMI_REQUEST_PEER_EXTD_STAT = BIT(10),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_BCN_STAT_RESET = BIT(12),
+ WMI_REQUEST_PEER_EXTD2_STAT = BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+ u32 tlv_header;
+ enum wmi_stats_id stats_id;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+struct wmi_bcn_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 tim_ie_offset;
+ u32 buf_len;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u32 csa_event_bitmap;
+ u32 mbssid_ie_offset;
+ u32 esp_ie_offset;
+} __packed;
+
+struct wmi_key_seq_counter {
+ u32 key_seq_counter_l;
+ u32 key_seq_counter_h;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u32 is_group_key_id_valid;
+ u32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct peer_assoc_params {
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ struct ath11k_ppe_threshold peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ u32 num_peer_legacy_rates;
+ u32 num_peer_ht_rates;
+ u32 peer_bw_rxnss_override;
+ struct wmi_ppe_threshold peer_ppet;
+ u32 peer_he_cap_info;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs;
+ u32 peer_he_cap_info_ext;
+ u32 peer_he_cap_info_internal;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ u32 tlv_header;
+ u32 requestor;
+ u32 scan_id;
+ u32 req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct scan_chan_list_params {
+ u32 pdev_id;
+ u16 nallchans;
+ struct channel_param ch_param[1];
+};
+
+struct wmi_scan_chan_list_cmd {
+ u32 tlv_header;
+ u32 num_scan_chans;
+ u32 flags;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_params {
+ u32 tlv_header;
+ u32 tx_params_dword0;
+ u32 tx_params_dword1;
+};
+
+struct wmi_mgmt_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 desc_id;
+ u32 chanfreq;
+ u32 paddr_lo;
+ u32 paddr_hi;
+ u32 frame_len;
+ u32 buf_len;
+ u32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+};
+
+struct wmi_sta_smps_force_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 forced_mode;
+};
+
+struct wmi_sta_smps_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct wmi_bcn_prb_info {
+ u32 tlv_header;
+ u32 caps;
+ u32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct green_ap_ps_params {
+ u32 value;
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+};
+
+struct ap_ps_params {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct vdev_set_params {
+ u32 if_id;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct stats_request_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct wmi_init_country_params {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 init_cc_type;
+ union {
+ u32 country_code;
+ u32 regdom_id;
+ u32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_info {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 filter_type;
+ u32 num_mac;
+} __packed;
+
+enum ath11k_wmi_pktlog_enable {
+ ATH11K_WMI_PKTLOG_ENABLE_AUTO = 0,
+ ATH11K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 evlist; /* WMI_PKTLOG_EVENT */
+ u32 enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 module_id;
+ u32 num_args;
+ u32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_HE 0x00000400
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_TWT_REQ 0x00400000
+#define WMI_PEER_TWT_RESP 0x00800000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
+#define WMI_PEER_160MHZ 0x40000000
+#define WMI_PEER_SAFEMODE_EN 0x80000000
+
+struct beacon_tmpl_params {
+ u8 vdev_id;
+ u32 tim_ie_offset;
+ u32 tmpl_len;
+ u32 tmpl_len_aligned;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u8 *frm;
+};
+
+struct wmi_rate_set {
+ u32 num_rates;
+ u32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+};
+
+struct wmi_vht_rate_set {
+ u32 tlv_header;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u32 tx_max_mcs_nss;
+} __packed;
+
+struct wmi_he_rate_set {
+ u32 tlv_header;
+ u32 rx_mcs_set;
+ u32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ u32 vdev_id;
+ u32 requestor_id;
+ enum wmi_start_event_param resp_type;
+ u32 status;
+ u32 chain_mask;
+ u32 smps_mode;
+ union {
+ u32 mac_id;
+ u32 pdev_id;
+ };
+ u32 cfgd_tx_streams;
+ u32 cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+;
+enum cc_setting_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+};
+
+/* Regaulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+};
+
+struct cur_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+};
+
+struct cur_regulatory_info {
+ enum cc_setting_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+ struct cur_reg_rule *reg_rules_2g_ptr;
+ struct cur_reg_rule *reg_rules_5g_ptr;
+};
+
+struct wmi_reg_chan_list_cc_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+} __packed;
+
+struct wmi_regulatory_rule_struct {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+};
+
+struct wmi_peer_delete_resp_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ u32 pdev_id;
+ u32 freq; /* Units in MHz */
+ u32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ u32 rx_clear_count_low;
+ u32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ u32 cycle_count_low;
+ u32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ u32 tx_cycle_count_low;
+ u32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ u32 rx_cycle_count_low;
+ u32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ u32 rx_bss_cycle_count_low;
+ u32 rx_bss_cycle_count_high;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+/*
+ * PDEV statistics
+ */
+struct wmi_pdev_stats_base {
+ s32 chan_nf;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+
+ /* Num Local frames queued */
+ s32 local_enqued;
+
+ /* Num Local frames done */
+ s32 local_freed;
+
+ /* Num queued to HW */
+ s32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+
+ /* Num underruns */
+ s32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ s32 mpdus_requed;
+
+ /* excessive retries */
+ u32 tx_ko;
+
+ /* data hw rate code */
+ u32 data_rc;
+
+ /* Scheduler self triggers */
+ u32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ u32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ s32 phy_errs;
+
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+ u32 stats_id;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ u32 num_mib_stats;
+ u32 pdev_id;
+ u32 num_bcn_stats;
+ u32 num_peer_extd_stats;
+ u32 num_peer_extd2_stats;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ u32 pdev_id;
+ u32 ctl_failsafe_status;
+} __packed;
+
+struct wmi_pdev_csa_switch_ev {
+ u32 pdev_id;
+ u32 current_switch_count;
+ u32 num_vdevs;
+} __packed;
+
+struct wmi_pdev_radar_ev {
+ u32 pdev_id;
+ u32 detection_mode;
+ u32 chan_freq;
+ u32 chan_width;
+ u32 detector_id;
+ u32 segment_id;
+ u32 timestamp;
+ u32 is_chirp;
+ s32 freq_offset;
+ s32 sidx;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct mgmt_rx_event_params {
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct wmi_mgmt_rx_hdr {
+ u32 channel;
+ u32 snr;
+ u32 rate;
+ u32 phy_mode;
+ u32 buf_len;
+ u32 status;
+ u32 rssi_ctl[ATH_MAX_ANTENNA];
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u32 rx_tsf_l32;
+ u32 rx_tsf_u32;
+ u32 pdev_id;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_rssi_ctl_ext {
+ u32 tlv_header;
+ u32 rssi_ctl_ext[MAX_ANTENNA_EIGHT - ATH_MAX_ANTENNA];
+};
+
+struct wmi_mgmt_tx_compl_event {
+ u32 desc_id;
+ u32 status;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_scan_event {
+ u32 event_type; /* %WMI_SCAN_EVENT_ */
+ u32 reason; /* %WMI_SCAN_REASON_ */
+ u32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ u32 scan_req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ u32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ u32 vdev_id;
+ u32 reason;
+ u32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 chan_tx_pwr_range;
+ u32 chan_tx_pwr_tp;
+ u32 rx_frame_count;
+ u32 my_bss_rx_cycle_count;
+ u32 rx_11b_mode_data_duration;
+ u32 tx_frame_cnt;
+ u32 mac_clk_mhz;
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_targ_cap {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? \
+ (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_ANY 0x8
+#define WMI_CIPHER_AES_GCM 0x9
+#define WMI_CIPHER_AES_GMAC 0xa
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH11K_RC_VERSION_OFFSET 28
+#define ATH11K_RC_PREAMBLE_OFFSET 8
+#define ATH11K_RC_NSS_OFFSET 5
+
+#define ATH11K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH11K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH11K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH11K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED : RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS : RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF : CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES - Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES - Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES - Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP - RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES - Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+struct ath11k_hal_reg_cap {
+ u32 eeprom_rd;
+ u32 eeprom_rd_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+struct ath11k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+enum ath11k_hw_txrx_mode {
+ ATH11K_HW_TXRX_RAW = 0,
+ ATH11K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH11K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ u32 tlv_header;
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txoplimit;
+ u32 acm;
+ u32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ u32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH11K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH11K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH11K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ s32 obss_min;
+ s32 obss_max;
+ u32 vdev_id;
+} __packed;
+
+struct target_resource_config {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 atf_config;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+};
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+struct ath11k_wmi_base {
+ struct ath11k_base *ab;
+ struct ath11k_pdev_wmi wmi[MAX_RADIOS];
+ enum ath11k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+ struct target_resource_config wlan_resource_config;
+
+ struct ath11k_targ_cap *targ_cap;
+};
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn);
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable);
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
+int ath11k_wmi_cmd_init(struct ath11k_base *ab);
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);
+int ath11k_wmi_connect(struct ath11k_base *ab);
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id);
+int ath11k_wmi_attach(struct ath11k_base *ab);
+void ath11k_wmi_detach(struct ath11k_base *ab);
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param);
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id,
+ const u8 *addr, dma_addr_t paddr,
+ u8 tid, u8 ba_window_size_valid,
+ u32 ba_window_size);
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param);
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms);
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id);
+void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg);
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params);
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param);
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param);
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param);
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param);
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param);
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param);
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list);
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id);
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_param);
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param);
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param);
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats);
+size_t ath11k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath11k_wmi_fw_stats_num_peers_extd(struct list_head *head);
+size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
+int ath11k_wmi_simulate_radar(struct ath11k *ar);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index c0794f5988b3..2c9cec8b53d9 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -106,7 +106,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
- mem = ioremap_nocache(res->start, resource_size(res));
+ mem = ioremap(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 63019c3de034..cdefb8e2daf1 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -92,7 +92,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -ENXIO;
}
- mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
index 547cd46da260..d0f1e8bcc846 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -406,7 +406,7 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
sram.com_att_6db =
ar9003_aic_find_index(1, fixed_com_att_db);
- sram.valid = 1;
+ sram.valid = true;
sram.rot_dir_att_db =
min(max(rot_dir_path_att_db,
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index fb649d85b8fc..dd0c32379375 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1216,7 +1216,7 @@ err_fw:
static int send_eject_command(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 20f4f8ea9f89..bee9110b91f3 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
- * 0x8000 set, this is not a supported regulatory domain
- * but since we have more than one user with it we need
- * a solution for them. We default to 0x64, which is the
- * default Atheros world regulatory domain.
+ * 0x8000 or 0x0 set, this is not a supported regulatory
+ * domain but since we have more than one user with it we
+ * need a solution for them. We default to 0x64, which is
+ * the default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
- if (reg->current_rd != COUNTRY_ERD_FLAG)
+ if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index c30fdd0cbf1e..e49c306e0eef 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1169,7 +1169,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 523550f94a3f..77269ac7f352 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -1620,7 +1620,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.beacon_length6 = msg_body.beacon_length + 6;
if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
- wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ wcn36xx_err("Beacon is too big: beacon size=%d\n",
msg_body.beacon_length);
ret = -ENOMEM;
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 7d6f14420855..0851d2bede89 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2579,6 +2579,38 @@ wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
return rc;
}
+static int wil_cfg80211_set_multicast_to_unicast(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (wil->multicast_to_unicast == enabled)
+ return 0;
+
+ wil_info(wil, "set multicast to unicast, enabled=%d\n", enabled);
+ wil->multicast_to_unicast = enabled;
+
+ return 0;
+}
+
+static int wil_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil->cqm_rssi_thold = rssi_thold;
+
+ rc = wmi_set_cqm_rssi_config(wil, rssi_thold, rssi_hyst);
+ if (rc)
+ /* reset stored value upon failure */
+ wil->cqm_rssi_thold = 0;
+
+ return rc;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2610,11 +2642,13 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .set_cqm_rssi_config = wil_cfg80211_set_cqm_rssi_config,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
.sched_scan_start = wil_cfg80211_sched_scan_start,
.sched_scan_stop = wil_cfg80211_sched_scan_stop,
.update_ft_ies = wil_cfg80211_update_ft_ies,
+ .set_multicast_to_unicast = wil_cfg80211_set_multicast_to_unicast,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 912c4eaf017b..fef10886ca4a 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -11,26 +11,6 @@
#include "wil6210.h"
-static int wil_ethtoolops_begin(struct net_device *ndev)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- mutex_lock(&wil->mutex);
-
- wil_dbg_misc(wil, "ethtoolops_begin\n");
-
- return 0;
-}
-
-static void wil_ethtoolops_complete(struct net_device *ndev)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- wil_dbg_misc(wil, "ethtoolops_complete\n");
-
- mutex_unlock(&wil->mutex);
-}
-
static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp)
{
@@ -39,11 +19,12 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
u32 rx_itr_en, rx_itr_val = 0;
int ret;
+ mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
ret = wil_pm_runtime_get(wil);
if (ret < 0)
- return ret;
+ goto out;
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -57,7 +38,11 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
- return 0;
+ ret = 0;
+
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
}
static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
@@ -67,12 +52,14 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
struct wireless_dev *wdev = ndev->ieee80211_ptr;
int ret;
+ mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
@@ -88,24 +75,26 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
ret = wil_pm_runtime_get(wil);
if (ret < 0)
- return ret;
+ goto out;
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
+ ret = 0;
- return 0;
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
out_bad:
wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n");
print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4,
cp, sizeof(*cp), false);
+ mutex_unlock(&wil->mutex);
return -EINVAL;
}
static const struct ethtool_ops wil_ethtool_ops = {
- .begin = wil_ethtoolops_begin,
- .complete = wil_ethtoolops_complete,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 06091d8a9e23..3ba5b2550a8c 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -762,7 +762,7 @@ int wil_priv_init(struct wil6210_priv *wil)
*/
wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
- wil->amsdu_en = 1;
+ wil->amsdu_en = true;
return 0;
@@ -1654,6 +1654,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
+ down_write(&wil->mem_lock);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
if (test_bit(wil_status_suspending, wil->status))
@@ -1702,6 +1704,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->secured_boot) {
wil_err(wil, "secured boot is not supported\n");
+ up_write(&wil->mem_lock);
return -ENOTSUPP;
}
@@ -1737,6 +1740,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
+ up_write(&wil->mem_lock);
+
if (load_fw) {
wil_unmask_irq(wil);
@@ -1786,6 +1791,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
out:
+ up_write(&wil->mem_lock);
clear_bit(wil_status_resetting, wil->status);
return rc;
}
@@ -1811,9 +1817,7 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
- down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
- up_write(&wil->mem_lock);
if (rc)
return rc;
@@ -1905,9 +1909,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- down_write(&wil->mem_lock);
rc = wil_reset(wil, false);
- up_write(&wil->mem_lock);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8ebc6d59aa74..bc8c15fb609d 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -10,6 +10,7 @@
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <linux/prefetch.h>
@@ -1139,7 +1140,7 @@ static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
- txdata->dot1x_open = 0;
+ txdata->dot1x_open = false;
txdata->enabled = 0;
txdata->idle = 0;
txdata->last_idle = 0;
@@ -1529,6 +1530,35 @@ static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
return v;
}
+/* apply multicast to unicast only for ARP and IP packets
+ * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
+ */
+static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ const struct ethhdr *eth = (void *)skb->data;
+ const struct vlan_ethhdr *ethvlan = (void *)skb->data;
+ __be16 ethertype;
+
+ if (!wil->multicast_to_unicast)
+ return false;
+
+ /* multicast to unicast conversion only for some payload */
+ ethertype = eth->h_proto;
+ if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
+ ethertype = ethvlan->h_vlan_encapsulated_proto;
+ switch (ethertype) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
@@ -2336,7 +2366,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* in STA mode (ESS), all to same VRING (to AP) */
ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
- if (vif->pbss)
+ if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 778b63be6a9a..7bfe867c7509 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -869,6 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
u8 data_offset;
struct wil_rx_status_extended *s;
u16 sring_idx = sring - wil->srings;
+ int invalid_buff_id_retry;
BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
@@ -882,9 +883,9 @@ again:
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ invalid_buff_id_retry = 0;
while (!buff_id) {
struct wil_rx_status_extended *s;
- int invalid_buff_id_retry = 0;
wil_dbg_txrx(wil,
"buff_id is not updated yet by HW, (swhead 0x%x)\n",
@@ -902,6 +903,11 @@ again:
if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
buff_id, sring->swhead);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
sring->invalid_buff_id_cnt++;
@@ -962,6 +968,11 @@ again:
if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
stats->rx_large_frame++;
rxdata->skipping = true;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index c744c65225da..c736f7413a35 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -46,7 +46,7 @@
#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
-#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(20)
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
@@ -244,8 +244,8 @@ struct wil_ring_tx_status {
* calculated, Bit1- L4Err - TCP/UDP Checksum Error
* bit 7 : Reserved:1
* bit 8..19 : Flow ID:12 - MSDU flow ID
- * bit 20..21 : MID:2 - The MAC ID
- * bit 22 : MID_V:1 - The MAC ID field is valid
+ * bit 20 : MID_V:1 - The MAC ID field is valid
+ * bit 21..22 : MID:2 - The MAC ID
* bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
* bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
* bit 25 : BC:1 - The received MPDU is broadcast
@@ -479,7 +479,7 @@ static inline int wil_rx_status_get_mid(void *msg)
return 0; /* use the default MID */
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
- 20, 21);
+ 21, 22);
}
static inline int wil_rx_status_get_error(void *msg)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 97626bfd4dac..5dc881d3c057 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1059,6 +1059,8 @@ struct wil6210_priv {
u32 max_agg_wsize;
u32 max_ampdu_size;
+ u8 multicast_to_unicast;
+ s32 cqm_rssi_thold;
};
#define wil_to_wiphy(i) (i->wiphy)
@@ -1148,7 +1150,7 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
*/
static inline bool wil_cid_valid(struct wil6210_priv *wil, int cid)
{
- return (cid >= 0 && cid < wil->max_assoc_sta);
+ return (cid >= 0 && cid < wil->max_assoc_sta && cid < WIL6210_MAX_CID);
}
void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
@@ -1440,4 +1442,6 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
void update_supported_bands(struct wil6210_priv *wil);
void wil_clear_fw_log_addr(struct wil6210_priv *wil);
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1332eb8c831f..89c12cb2aaab 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -46,7 +46,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
- int i, rc;
+ int i;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
@@ -62,9 +62,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
- rc = wil_mem_access_lock(wil);
- if (rc)
- return rc;
+ down_write(&wil->mem_lock);
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil,
+ "suspend/resume in progress. cannot copy crash dump\n");
+ up_write(&wil->mem_lock);
+ return -EBUSY;
+ }
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -84,7 +90,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
- wil_mem_access_unlock(wil);
+
+ up_write(&wil->mem_lock);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 7a0d934eb271..23e1ed6a9d6d 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -196,8 +196,8 @@ const struct fw_map talyn_mb_fw_mapping[] = {
{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
/* DMA OFU 296b */
{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
- /* ucode debug 4k */
- {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+ /* ucode debug 256b */
+ {0x8c3000, 0x8c3100, 0x8c3000, "ucode_debug", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
@@ -476,6 +476,8 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_RBUFCAP_CFG_CMD";
case WMI_TEMP_SENSE_ALL_CMDID:
return "WMI_TEMP_SENSE_ALL_CMDID";
+ case WMI_SET_LINK_MONITOR_CMDID:
+ return "WMI_SET_LINK_MONITOR_CMD";
default:
return "Untracked CMD";
}
@@ -624,6 +626,10 @@ static const char *eventid2name(u16 eventid)
return "WMI_RBUFCAP_CFG_EVENT";
case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
+ case WMI_SET_LINK_MONITOR_EVENTID:
+ return "WMI_SET_LINK_MONITOR_EVENT";
+ case WMI_LINK_MONITOR_EVENTID:
+ return "WMI_LINK_MONITOR_EVENT";
default:
return "Untracked EVENT";
}
@@ -1507,14 +1513,14 @@ static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
if (vif->fw_stats_ready) {
/* clean old statistics */
vif->fw_stats_tsf = 0;
- vif->fw_stats_ready = 0;
+ vif->fw_stats_ready = false;
}
wil_link_stats_store_basic(vif, payload + hdr_size);
if (!has_next) {
vif->fw_stats_tsf = tsf;
- vif->fw_stats_ready = 1;
+ vif->fw_stats_ready = true;
}
break;
@@ -1529,14 +1535,14 @@ static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
if (wil->fw_stats_global.ready) {
/* clean old statistics */
wil->fw_stats_global.tsf = 0;
- wil->fw_stats_global.ready = 0;
+ wil->fw_stats_global.ready = false;
}
wil_link_stats_store_global(vif, payload + hdr_size);
if (!has_next) {
wil->fw_stats_global.tsf = tsf;
- wil->fw_stats_global.ready = 1;
+ wil->fw_stats_global.ready = true;
}
break;
@@ -1836,6 +1842,32 @@ fail:
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
+static void
+wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_link_monitor_event *evt = d;
+ enum nl80211_cqm_rssi_threshold_event event_type;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "link monitor event too short %d\n", len);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "link monitor event, type %d rssi %d (stored %d)\n",
+ evt->type, evt->rssi_level, wil->cqm_rssi_thold);
+
+ if (evt->type != WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT)
+ /* ignore */
+ return;
+
+ event_type = (evt->rssi_level > wil->cqm_rssi_thold ?
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW);
+ cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -1869,6 +1901,7 @@ static const struct {
{WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
{WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status},
{WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status},
+ {WMI_LINK_MONITOR_EVENTID, wmi_evt_link_monitor},
};
/*
@@ -3981,3 +4014,46 @@ int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
return 0;
}
+
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct {
+ struct wmi_set_link_monitor_cmd cmd;
+ s8 rssi_thold;
+ } __packed cmd = {
+ .cmd = {
+ .rssi_hyst = rssi_hyst,
+ .rssi_thresholds_list_size = 1,
+ },
+ .rssi_thold = rssi_thold,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_set_link_monitor_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
+ return -EINVAL;
+
+ rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6bd4ccee28ab..e3558136e0c4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -192,6 +192,7 @@ enum wmi_command_id {
WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
+ WMI_SET_LINK_MONITOR_CMDID = 0x845,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@@ -1973,6 +1974,7 @@ enum wmi_event_id {
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_LINK_MONITOR_EVENTID = 0x100E,
WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
@@ -2024,6 +2026,7 @@ enum wmi_event_id {
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_SET_LINK_MONITOR_EVENTID = 0x1845,
WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856,
WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857,
WMI_LED_CFG_DONE_EVENTID = 0x1858,
@@ -3312,6 +3315,36 @@ struct wmi_link_maintain_cfg_read_cmd {
__le32 cid;
} __packed;
+/* WMI_SET_LINK_MONITOR_CMDID */
+struct wmi_set_link_monitor_cmd {
+ u8 rssi_hyst;
+ u8 reserved[12];
+ u8 rssi_thresholds_list_size;
+ s8 rssi_thresholds_list[0];
+} __packed;
+
+/* wmi_link_monitor_event_type */
+enum wmi_link_monitor_event_type {
+ WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT = 0x00,
+ WMI_LINK_MONITOR_NOTIF_TX_ERR_EVT = 0x01,
+ WMI_LINK_MONITOR_NOTIF_THERMAL_EVT = 0x02,
+};
+
+/* WMI_SET_LINK_MONITOR_EVENTID */
+struct wmi_set_link_monitor_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_MONITOR_EVENTID */
+struct wmi_link_monitor_event {
+ /* link_monitor_event_type */
+ u8 type;
+ s8 rssi_level;
+ u8 reserved[2];
+} __packed;
+
/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
struct wmi_link_maintain_cfg_write_done_event {
/* requested connection ID */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index db2c3b8d491e..3b2680772f03 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2236,7 +2236,7 @@ static int at76_alloc_urbs(struct at76_priv *priv,
at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
at76_dbg(DBG_URB, "%s: NumEndpoints %d ", __func__,
- interface->altsetting[0].desc.bNumEndpoints);
+ interface->cur_altsetting->desc.bNumEndpoints);
ep_in = NULL;
ep_out = NULL;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 4325e91736eb..8b6b657c4b85 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1275,8 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
}
/* Interrupt handler bottom-half */
-static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
+static void b43legacy_interrupt_tasklet(unsigned long data)
{
+ struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
@@ -3741,7 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
tasklet_init(&wldev->isr_tasklet,
- (void (*)(unsigned long))b43legacy_interrupt_tasklet,
+ b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
wldev->__using_pio = true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 96fd8e2bf773..b684a5b6d904 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -43,6 +43,7 @@
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
+#define SDIO_4359_FUNC2_BLOCKSIZE 256
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
@@ -119,7 +120,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
brcmf_err("enable_irq_wake failed %d\n", ret);
return ret;
}
- sdiodev->irq_wake = true;
+ disable_irq_wake(pdata->oob_irq_nr);
sdio_claim_host(sdiodev->func1);
@@ -178,10 +179,6 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
sdiodev->oob_irq_requested = false;
- if (sdiodev->irq_wake) {
- disable_irq_wake(pdata->oob_irq_nr);
- sdiodev->irq_wake = false;
- }
free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
sdiodev->irq_en = false;
sdiodev->oob_irq_requested = false;
@@ -903,6 +900,7 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
+ unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
sdio_claim_host(sdiodev->func1);
@@ -912,7 +910,9 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
goto out;
}
- ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
+ if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359)
+ f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE;
+ ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
@@ -969,8 +969,10 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_89359),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1167,6 +1169,10 @@ static int brcmf_ops_sdio_resume(struct device *dev)
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
+ if (sdiodev->wowl_enabled &&
+ sdiodev->settings->bus.sdio.oob_irq_supported)
+ disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+
brcmf_sdiod_freezer_off(sdiodev);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 5598bbd09b62..a2328d3eee03 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
+#include <uapi/linux/if_arp.h>
#include <brcmu_utils.h>
#include <defs.h>
@@ -619,6 +620,82 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
}
+/**
+ * brcmf_mon_add_vif() - create monitor mode virtual interface
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ */
+static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
+ const char *name)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *ndev;
+ struct brcmf_if *ifp;
+ int err;
+
+ if (cfg->pub->mon_if) {
+ err = -EEXIST;
+ goto err_out;
+ }
+
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_MONITOR);
+ if (IS_ERR(vif)) {
+ err = PTR_ERR(vif);
+ goto err_out;
+ }
+
+ ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN, ether_setup);
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_free_vif;
+ }
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ ndev->ieee80211_ptr = &vif->wdev;
+ ndev->needs_free_netdev = true;
+ ndev->priv_destructor = brcmf_cfg80211_free_netdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
+ ifp = netdev_priv(ndev);
+ ifp->vif = vif;
+ ifp->ndev = ndev;
+ ifp->drvr = cfg->pub;
+
+ vif->ifp = ifp;
+ vif->wdev.netdev = ndev;
+
+ err = brcmf_net_mon_attach(ifp);
+ if (err) {
+ brcmf_err("Failed to attach %s device\n", ndev->name);
+ free_netdev(ndev);
+ goto err_free_vif;
+ }
+
+ cfg->pub->mon_if = ifp;
+
+ return &vif->wdev;
+
+err_free_vif:
+ brcmf_free_vif(vif);
+err_out:
+ return ERR_PTR(err);
+}
+
+static int brcmf_mon_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = wdev->netdev;
+
+ ndev->netdev_ops->ndo_stop(ndev);
+
+ brcmf_net_detach(ndev, true);
+
+ cfg->pub->mon_if = NULL;
+
+ return 0;
+}
+
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
@@ -641,9 +718,10 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_MONITOR:
+ return brcmf_mon_add_vif(wiphy, name);
case NL80211_IFTYPE_AP:
wdev = brcmf_ap_add_vif(wiphy, name, params);
break;
@@ -826,9 +904,10 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return -EOPNOTSUPP;
+ case NL80211_IFTYPE_MONITOR:
+ return brcmf_mon_del_vif(wiphy, wdev);
case NL80211_IFTYPE_AP:
return brcmf_cfg80211_del_ap_iface(wiphy, wdev);
case NL80211_IFTYPE_P2P_CLIENT:
@@ -5363,6 +5442,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
bool mbss;
+ struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
@@ -5375,7 +5455,8 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
- if (type == NL80211_IFTYPE_AP) {
+ if (type == NL80211_IFTYPE_AP &&
+ brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
mbss = false;
list_for_each_entry(vif_walk, &cfg->vif_list, list) {
if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) {
@@ -6012,19 +6093,17 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
(void *)roamtrigger, sizeof(roamtrigger));
- if (err) {
+ if (err)
bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err);
- goto roam_setup_done;
- }
roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
(void *)roam_delta, sizeof(roam_delta));
- if (err) {
+ if (err)
bphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d)\n", err);
- goto roam_setup_done;
- }
+
+ return 0;
roam_setup_done:
return err;
@@ -6522,6 +6601,9 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* #STA <= 1, #AP <= 1, channels = 1, 2 total
* #AP <= 4, matching BI, channels = 1, 4 total
*
+ * no p2p and rsdb:
+ * #STA <= 2, #AP <= 2, channels = 2, 4 total
+ *
* p2p, no mchan, and mbss:
*
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
@@ -6533,6 +6615,10 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
* #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
* #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, rsdb, and no mbss:
+ * #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
+ * channels = 2, 4 total
*/
static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
{
@@ -6540,13 +6626,16 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
struct ieee80211_iface_limit *c0_limits = NULL;
struct ieee80211_iface_limit *p2p_limits = NULL;
struct ieee80211_iface_limit *mbss_limits = NULL;
- bool mbss, p2p;
- int i, c, n_combos;
+ bool mon_flag, mbss, p2p, rsdb, mchan;
+ int i, c, n_combos, n_limits;
+ mon_flag = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FLAG);
mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+ rsdb = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB);
+ mchan = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN);
- n_combos = 1 + !!p2p + !!mbss;
+ n_combos = 1 + !!(p2p && !rsdb) + !!mbss;
combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
if (!combo)
goto err;
@@ -6554,37 +6643,53 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ if (mon_flag)
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
+ if (p2p)
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
c = 0;
i = 0;
- c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+ n_limits = 1 + mon_flag + (p2p ? 2 : 0) + (rsdb || !p2p);
+ c0_limits = kcalloc(n_limits, sizeof(*c0_limits), GFP_KERNEL);
if (!c0_limits)
goto err;
- c0_limits[i].max = 1;
+
+ combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan));
+ c0_limits[i].max = 1 + rsdb;
c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+ if (mon_flag) {
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
+ }
if (p2p) {
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
- combo[c].num_different_channels = 2;
- else
- combo[c].num_different_channels = 1;
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
- c0_limits[i].max = 1;
+ c0_limits[i].max = 1 + rsdb;
c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
+ }
+ if (p2p && rsdb) {
+ c0_limits[i].max = 2;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 5;
+ } else if (p2p) {
+ combo[c].max_interfaces = i;
+ } else if (rsdb) {
+ c0_limits[i].max = 2;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 3;
} else {
- combo[c].num_different_channels = 1;
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = i;
}
- combo[c].max_interfaces = i;
combo[c].n_limits = i;
combo[c].limits = c0_limits;
- if (p2p) {
+ if (p2p && !rsdb) {
c++;
i = 0;
p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
@@ -6607,14 +6712,20 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
if (mbss) {
c++;
i = 0;
- mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+ n_limits = 1 + mon_flag;
+ mbss_limits = kcalloc(n_limits, sizeof(*mbss_limits),
+ GFP_KERNEL);
if (!mbss_limits)
goto err;
mbss_limits[i].max = 4;
mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ if (mon_flag) {
+ mbss_limits[i].max = 1;
+ mbss_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
+ }
combo[c].beacon_int_infra_match = true;
combo[c].num_different_channels = 1;
- combo[c].max_interfaces = 4;
+ combo[c].max_interfaces = 4 + mon_flag;
combo[c].n_limits = i;
combo[c].limits = mbss_limits;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index a795d781b4c5..282d0bc14e8e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -433,11 +433,25 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
{
struct brcmf_chip_priv *ci;
int count;
+ struct brcmf_core *d11core2 = NULL;
+ struct brcmf_core_priv *d11priv2 = NULL;
ci = core->chip;
+ /* special handle two D11 cores reset */
+ if (core->pub.id == BCMA_CORE_80211) {
+ d11core2 = brcmf_chip_get_d11core(&ci->pub, 1);
+ if (d11core2) {
+ brcmf_dbg(INFO, "found two d11 cores, reset both\n");
+ d11priv2 = container_of(d11core2,
+ struct brcmf_core_priv, pub);
+ }
+ }
+
/* must disable first to work for arbitrary current core state */
brcmf_chip_ai_coredisable(core, prereset, reset);
+ if (d11priv2)
+ brcmf_chip_ai_coredisable(d11priv2, prereset, reset);
count = 0;
while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
@@ -449,9 +463,30 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
usleep_range(40, 60);
}
+ if (d11priv2) {
+ count = 0;
+ while (ci->ops->read32(ci->ctx,
+ d11priv2->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx,
+ d11priv2->wrapbase + BCMA_RESET_CTL,
+ 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+ }
+
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
postreset | BCMA_IOCTL_CLK);
ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ if (d11priv2) {
+ ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL);
+ }
}
char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len)
@@ -677,7 +712,6 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
case BRCM_CC_4358_CHIP_ID:
- case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
case BRCM_CC_4371_CHIP_ID:
return 0x180000;
@@ -687,6 +721,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4366_CHIP_ID:
case BRCM_CC_43664_CHIP_ID:
return 0x200000;
+ case BRCM_CC_4359_CHIP_ID:
+ return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
case CY_CC_4373_CHIP_ID:
return 0x160000;
default:
@@ -1109,6 +1145,21 @@ void brcmf_chip_detach(struct brcmf_chip *pub)
kfree(chip);
}
+struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list) {
+ if (core->pub.id == BCMA_CORE_80211) {
+ if (unit-- == 0)
+ return &core->pub;
+ }
+ }
+ return NULL;
+}
+
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
{
struct brcmf_chip_priv *chip;
@@ -1357,6 +1408,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
+ case BRCM_CC_4359_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
index 7b00f6a59e89..8fa38658e727 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
@@ -74,6 +74,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
const struct brcmf_buscore_ops *ops);
void brcmf_chip_detach(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit);
struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub);
bool brcmf_chip_iscoreup(struct brcmf_core *core);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 85cf96461dde..23627c953a5e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -661,6 +661,8 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
goto fail;
}
+ netif_carrier_off(ndev);
+
ndev->priv_destructor = brcmf_cfg80211_free_netdev;
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
return 0;
@@ -671,7 +673,7 @@ fail:
return -EBADE;
}
-static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
+void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
{
if (ndev->reg_state == NETREG_REGISTERED) {
if (rtnl_locked)
@@ -684,6 +686,72 @@ static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
}
}
+static int brcmf_net_mon_open(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+ u32 monitor;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_MONITOR, &monitor);
+ if (err) {
+ bphy_err(drvr, "BRCMF_C_GET_MONITOR error (%d)\n", err);
+ return err;
+ } else if (monitor) {
+ bphy_err(drvr, "Monitor mode is already enabled\n");
+ return -EEXIST;
+ }
+
+ monitor = 3;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
+ if (err)
+ bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
+
+ return err;
+}
+
+static int brcmf_net_mon_stop(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+ u32 monitor;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ monitor = 0;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
+ if (err)
+ bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
+
+ return err;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_mon = {
+ .ndo_open = brcmf_net_mon_open,
+ .ndo_stop = brcmf_net_mon_stop,
+};
+
+int brcmf_net_mon_attach(struct brcmf_if *ifp)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct net_device *ndev;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ndev = ifp->ndev;
+ ndev->netdev_ops = &brcmf_netdev_ops_mon;
+
+ err = register_netdevice(ndev);
+ if (err)
+ bphy_err(drvr, "Failed to register %s device\n", ndev->name);
+
+ return err;
+}
+
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
{
struct net_device *ndev;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 6699637d3bf8..33b2ab3b54b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -210,6 +210,8 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
+int brcmf_net_mon_attach(struct brcmf_if *ifp);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 1c9c74cc958e..5da0dda0d899 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -38,6 +38,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MCHAN, "mchan" },
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
+ { BRCMF_FEAT_MONITOR_FLAG, "rtap" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
{ BRCMF_FEAT_SAE, "sae" },
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 280a1f6412d4..cda3fc1bab7f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -23,6 +23,7 @@
* GSCAN: enhanced scan offload feature.
* FWSUP: Firmware supplicant.
* MONITOR: firmware can pass monitor packets to host.
+ * MONITOR_FLAG: firmware flags monitor packets.
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
@@ -44,6 +45,7 @@
BRCMF_FEAT_DEF(GSCAN) \
BRCMF_FEAT_DEF(FWSUP) \
BRCMF_FEAT_DEF(MONITOR) \
+ BRCMF_FEAT_DEF(MONITOR_FLAG) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 0ff6f5212a94..ae4cf4372908 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -49,6 +49,8 @@
#define BRCMF_C_GET_PM 85
#define BRCMF_C_SET_PM 86
#define BRCMF_C_GET_REVINFO 98
+#define BRCMF_C_GET_MONITOR 107
+#define BRCMF_C_SET_MONITOR 108
#define BRCMF_C_GET_CURR_RATESET 114
#define BRCMF_C_GET_AP 117
#define BRCMF_C_SET_AP 118
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 2bd892df83cc..5e1a11c07551 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -908,7 +908,7 @@ static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
wlh += wlh[1] + 2;
if (entry->send_tim_signal) {
- entry->send_tim_signal = 0;
+ entry->send_tim_signal = false;
wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
wlh[2] = entry->mac_handle;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index e3dd8623be4e..8bb4f1fa790e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -365,7 +365,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
struct brcmf_msgbuf_pktid *pktid;
struct sk_buff *skb;
- if (idx < 0 || idx >= pktids->array_size) {
+ if (idx >= pktids->array_size) {
brcmf_err("Invalid packet id %d (max %d)\n", idx,
pktids->array_size);
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 7ba9f6a68645..1f5deea5a288 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2092,7 +2092,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
/* firmware requires unique mac address for p2pdev interface */
if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
bphy_err(drvr, "discovery vif must be different from primary interface\n");
- return ERR_PTR(-EINVAL);
+ err = -EINVAL;
+ goto fail;
}
brcmf_p2p_generate_bss_mac(p2p, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f64ce5074a55..5105f62767fb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -78,7 +78,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
};
-#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
+#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
#define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
@@ -1643,8 +1643,8 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
return -EINVAL;
}
- devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
- devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
+ devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
+ devinfo->tcm = ioremap(bar1_addr, bar1_size);
if (!devinfo->regs || !devinfo->tcm) {
brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 264ad63232f8..f9047db6a11d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -42,6 +42,8 @@
#define DEFAULT_F2_WATERMARK 0x8
#define CY_4373_F2_WATERMARK 0x40
#define CY_43012_F2_WATERMARK 0x60
+#define CY_4359_F2_WATERMARK 0x40
+#define CY_4359_F1_MESBUSYCTRL (CY_4359_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
#ifdef DEBUG
@@ -614,6 +616,7 @@ BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
+BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
@@ -636,6 +639,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
+ BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012)
};
@@ -1935,6 +1939,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
+ continue;
}
bus->sdcnt.rx_readahead_cnt++;
if (rd->len != roundup(rd_new.len, 16)) {
@@ -4205,6 +4210,19 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
&err);
break;
+ case SDIO_DEVICE_ID_BROADCOM_4359:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
+ CY_4359_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_4359_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_4359_F1_MESBUSYCTRL, &err);
+ break;
default:
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
DEFAULT_F2_WATERMARK, &err);
@@ -4225,6 +4243,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
if (err == 0) {
+ /* Assign bus interface call back */
+ sdiod->bus_if->dev = sdiod->dev;
+ sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
+ sdiod->bus_if->chip = bus->ci->chip;
+ sdiod->bus_if->chiprev = bus->ci->chiprev;
+
/* Allow full data communication using DPC from now on. */
brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
@@ -4241,12 +4265,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
sdio_release_host(sdiod->func1);
- /* Assign bus interface call back */
- sdiod->bus_if->dev = sdiod->dev;
- sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
- sdiod->bus_if->chip = bus->ci->chip;
- sdiod->bus_if->chiprev = bus->ci->chiprev;
-
err = brcmf_alloc(sdiod->dev, sdiod->settings);
if (err) {
brcmf_err("brcmf_alloc failed\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 0bd47c119dae..163fd664780a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -178,7 +178,6 @@ struct brcmf_sdio_dev {
bool sd_irq_requested;
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
- bool irq_wake; /* irq wake enable flags */
bool sg_support;
uint max_request_size;
ushort max_segment_count;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 06f3c01f10b3..575ed19e9195 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -430,6 +430,7 @@ fail:
usb_free_urb(req->urb);
list_del(q->next);
}
+ kfree(reqs);
return NULL;
}
@@ -1348,7 +1349,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail;
}
- desc = &intf->altsetting[0].desc;
+ desc = &intf->cur_altsetting->desc;
if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
(desc->bInterfaceSubClass != 2) ||
(desc->bInterfaceProtocol != 0xff)) {
@@ -1361,7 +1362,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
num_of_eps = desc->bNumEndpoints;
for (ep = 0; ep < num_of_eps; ep++) {
- endpoint = &intf->altsetting[0].endpoint[ep].desc;
+ endpoint = &intf->cur_altsetting->endpoint[ep].desc;
endpoint_num = usb_endpoint_num(endpoint);
if (!usb_endpoint_xfer_bulk(endpoint))
continue;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 3f09d89ba922..7f2c15c799d2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -5408,7 +5408,7 @@ int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
{
u16 chspec = ch20mhz_chspec(channel);
- if (channel < 0 || channel > MAXCHANNEL)
+ if (channel > MAXCHANNEL)
return -EINVAL;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec))
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index f43c06569ea1..c4c8f1b62e1e 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
case AIROGVLIST: ridcode = RID_APLIST; break;
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
case AIROGSTAT: ridcode = RID_STATUS; break;
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
case AIROGSTATSC32: ridcode = RID_STATS; break;
@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
return -EINVAL;
}
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index c4c83ab60cbc..536cd729c086 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3206,8 +3206,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
}
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
+static void ipw2100_irq_tasklet(unsigned long data)
{
+ struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
struct net_device *dev = priv->net_dev;
unsigned long flags;
u32 inta, tmp;
@@ -5833,7 +5834,7 @@ static int ipw2100_close(struct net_device *dev)
/*
* TODO: Fix this function... its just wrong
*/
-static void ipw2100_tx_timeout(struct net_device *dev)
+static void ipw2100_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6006,7 +6007,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
+static void ipw2100_irq_tasklet(unsigned long data);
static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
@@ -6136,7 +6137,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw2100_irq_tasklet, (unsigned long)priv);
/* NOTE: We do not start the deferred work for status checks yet */
@@ -6167,7 +6168,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
printk(KERN_WARNING DRV_NAME
- "Error calling ioremap_nocache.\n");
+ "Error calling ioremap.\n");
err = -EIO;
goto fail;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 31e43fc1d12b..5ef6f87a48ac 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1945,8 +1945,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(struct ipw_priv *priv)
+static void ipw_irq_tasklet(unsigned long data)
{
+ struct ipw_priv *priv = (struct ipw_priv *)data;
u32 inta, inta_mask, handled = 0;
unsigned long flags;
int rc = 0;
@@ -10677,7 +10678,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
return ret;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 1168055da182..206b43b9dff8 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1376,8 +1376,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
}
static void
-il3945_irq_tasklet(struct il_priv *il)
+il3945_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -3401,7 +3402,7 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il3945_irq_tasklet,
+ il3945_irq_tasklet,
(unsigned long)il);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 3664f56f8cbd..d1e17589dbeb 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4343,8 +4343,9 @@ il4965_synchronize_irq(struct il_priv *il)
}
static void
-il4965_irq_tasklet(struct il_priv *il)
+il4965_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -6237,7 +6238,7 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il4965_irq_tasklet,
+ il4965_irq_tasklet,
(unsigned long)il);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index d966b29b45ee..348c17ce72f5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -699,7 +699,7 @@ il_eeprom_init(struct il_priv *il)
u32 gp = _il_rd(il, CSR_EEPROM_GP);
int sz;
int ret;
- u16 addr;
+ int addr;
/* allocate eeprom */
sz = il->cfg->eeprom_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index b92255b91b72..8a4579bb10d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -77,8 +77,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -104,8 +103,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 2b1ae0cecc83..7140a5f3ea8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -103,8 +103,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -131,8 +130,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -153,8 +151,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -181,8 +178,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 56dc335a788c..a22a830019c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -92,6 +92,7 @@
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -199,7 +200,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
- .trans.csr = &iwl_csr_v1, \
.gp2_reg_addr = 0xa02c68, \
.mon_dram_regs = { \
.write_ptr = { \
@@ -217,7 +217,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_AX210, \
.trans.base_params = &iwl_ax210_base_params, \
- .trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 512, \
@@ -236,24 +235,16 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-const struct iwl_cfg iwl22000_2ac_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22500,
-};
-
-const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22500,
- .cdb = true,
-};
-
-const struct iwl_cfg iwl22000_2ac_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22500,
-};
+/*
+ * If the device doesn't support HE, no need to have that many buffers.
+ * 22000 devices can split multiple frames into a single RB, so fewer are
+ * needed; AX210 cannot (but use smaller RBs by default) - these sizes
+ * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
+ * additional overhead to account for processing time.
+ */
+#define IWL_NUM_RBDS_NON_HE 512
+#define IWL_NUM_RBDS_22000_HE 2048
+#define IWL_NUM_RBDS_AX210_HE 4096
const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
@@ -266,6 +257,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.tx_with_siso_diversity = true,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
@@ -278,6 +270,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
@@ -290,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
@@ -302,6 +296,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
@@ -314,6 +309,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
@@ -326,6 +322,7 @@ const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
@@ -338,6 +335,7 @@ const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
@@ -350,6 +348,7 @@ const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
@@ -363,6 +362,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650x_2ax_cfg = {
@@ -376,6 +376,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650w_2ax_cfg = {
@@ -389,6 +390,7 @@ const struct iwl_cfg killer1650w_2ax_cfg = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
/*
@@ -400,48 +402,56 @@ const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
@@ -454,6 +464,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -468,6 +479,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
@@ -482,6 +494,7 @@ const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -496,6 +509,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -510,6 +524,7 @@ const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -524,6 +539,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -538,18 +554,21 @@ const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
@@ -562,6 +581,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
@@ -574,6 +594,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
@@ -586,6 +607,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
@@ -598,6 +620,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
@@ -610,6 +633,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_jf = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
@@ -622,6 +646,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
@@ -634,6 +659,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
@@ -646,18 +672,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
@@ -665,6 +694,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
@@ -672,12 +702,23 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
+ .fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index aab4495c6085..3591336dc644 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -75,8 +75,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -125,7 +124,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
- .trans.csr = &iwl_csr_v1,
};
#define IWL_DEVICE_5150 \
@@ -141,8 +139,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index 39ea81903dbe..b4a8a6804c39 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -124,8 +124,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -179,8 +178,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -216,8 +214,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -272,8 +269,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -306,8 +302,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -333,8 +328,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
@@ -361,7 +355,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK,
- .trans.csr = &iwl_csr_v1,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index deb520aeb3f8..b72993e07fc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -154,8 +154,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .dccm_offset = IWL7000_DCCM_OFFSET, \
- .trans.csr = &iwl_csr_v1
+ .dccm_offset = IWL7000_DCCM_OFFSET
#define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index b3cc477140c0..280d84fa5cb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -151,8 +151,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.apmg_not_supported = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000, \
- .trans.csr = &iwl_csr_v1
+ .min_umac_error_event_table = 0x800000
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e9155b9b5ee4..379ea788e424 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -138,13 +138,13 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
+ .num_rbds = 512, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.trans.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \
- .trans.csr = &iwl_csr_v1, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
@@ -171,6 +171,12 @@ static const struct iwl_tt_params iwl9000_tt_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+};
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
@@ -184,8 +190,10 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
IWL_DEVICE_9000,
};
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
+
const struct iwl_cfg iwl9260_2ac_160_cfg = {
- .name = "Intel(R) Wireless-AC 9260 160MHz",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 4f2789bb3b5b..598ee7315558 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1255,7 +1255,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
hw = iwl_alloc_all();
if (!hw) {
- pr_err("%s: Cannot allocate network device\n", cfg->name);
+ pr_err("%s: Cannot allocate network device\n", trans->name);
goto out;
}
@@ -1390,7 +1390,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* 2. Read REV register
***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->trans->hw_rev);
+ priv->trans->name, priv->trans->hw_rev);
if (iwl_trans_start_hw(priv->trans))
goto out_free_hw;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index cd73fc5cfcbb..fd454836adbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
@@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
- memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 40fe2d667622..48d375a86d86 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
- int i, n_profiles, tbl_rev;
- int ret = 0;
+ int i, n_profiles, tbl_rev, pos;
+ int ret = 0;
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
@@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
+ /* the tables start at element 3 */
+ pos = 3;
+ for (i = 0; i < n_profiles; i++) {
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 7a0fe5adefa5..a0d6802c2715 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -240,7 +240,7 @@ enum iwl_tof_responder_cfg_flags {
};
/**
- * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * struct iwl_tof_responder_config_cmd_v6 - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
@@ -258,7 +258,7 @@ enum iwl_tof_responder_cfg_flags {
* @bssid: Current AP BSSID
* @reserved2: reserved
*/
-struct iwl_tof_responder_config_cmd {
+struct iwl_tof_responder_config_cmd_v6 {
__le32 cmd_valid_fields;
__le32 responder_cfg_flags;
u8 bandwidth;
@@ -274,6 +274,42 @@ struct iwl_tof_responder_config_cmd {
__le16 reserved2;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
@@ -403,7 +439,7 @@ enum iwl_initiator_ap_flags {
};
/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry_v3 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @bandwidth: AP bandwidth. One of iwl_tof_bandwidth.
@@ -420,7 +456,7 @@ enum iwl_initiator_ap_flags {
* @reserved: For alignment and future use
* @tsf_delta: not in use
*/
-struct iwl_tof_range_req_ap_entry {
+struct iwl_tof_range_req_ap_entry_v3 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 bandwidth;
@@ -435,6 +471,72 @@ struct iwl_tof_range_req_ap_entry {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
/**
+ * enum iwl_location_frame_format - location frame formats
+ * @IWL_LOCATION_FRAME_FORMAT_LEGACY: legacy
+ * @IWL_LOCATION_FRAME_FORMAT_HT: HT
+ * @IWL_LOCATION_FRAME_FORMAT_VHT: VHT
+ * @IWL_LOCATION_FRAME_FORMAT_HE: HE
+ */
+enum iwl_location_frame_format {
+ IWL_LOCATION_FRAME_FORMAT_LEGACY,
+ IWL_LOCATION_FRAME_FORMAT_HT,
+ IWL_LOCATION_FRAME_FORMAT_VHT,
+ IWL_LOCATION_FRAME_FORMAT_HE,
+};
+
+/**
+ * enum iwl_location_bw - location bandwidth selection
+ * @IWL_LOCATION_BW_20MHZ: 20MHz
+ * @IWL_LOCATION_BW_40MHZ: 40MHz
+ * @IWL_LOCATION_BW_80MHZ: 80MHz
+ */
+enum iwl_location_bw {
+ IWL_LOCATION_BW_20MHZ,
+ IWL_LOCATION_BW_40MHZ,
+ IWL_LOCATION_BW_80MHZ,
+};
+
+#define HLTK_11AZ_LEN 32
+#define TK_11AZ_LEN 32
+
+#define LOCATION_BW_POS 4
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @reserved: For alignment and future use
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ */
+struct iwl_tof_range_req_ap_entry {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ __le16 reserved;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -536,6 +638,38 @@ struct iwl_tof_range_req_cmd_v5 {
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
/**
+ * struct iwl_tof_range_req_cmd_v7 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v7 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+
+/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
@@ -565,7 +699,7 @@ struct iwl_tof_range_req_cmd {
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/*
* enum iwl_tof_range_request_status - status of the sent request
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 408798f351c6..1b2b5fa56e19 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -922,21 +922,6 @@ struct iwl_scan_probe_params_v4 {
#define SCAN_MAX_NUM_CHANS_V3 67
/**
- * struct iwl_scan_channel_params_v3
- * @flags: channel flags &enum iwl_scan_channel_flags
- * @count: num of channels in scan request
- * @reserved: for future use and alignment
- * @channel_config: array of explicit channel configurations
- * for 2.4Ghz and 5.2Ghz bands
- */
-struct iwl_scan_channel_params_v3 {
- u8 flags;
- u8 count;
- __le16 reserved;
- struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
-} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
-
-/**
* struct iwl_scan_channel_params_v4
* @flags: channel flags &enum iwl_scan_channel_flags
* @count: num of channels in scan request
@@ -1011,20 +996,6 @@ struct iwl_scan_periodic_parms_v1 {
} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
/**
- * struct iwl_scan_req_params_v11
- * @general_params: &struct iwl_scan_general_params_v10
- * @channel_params: &struct iwl_scan_channel_params_v3
- * @periodic_params: &struct iwl_scan_periodic_parms_v1
- * @probe_params: &struct iwl_scan_probe_params_v3
- */
-struct iwl_scan_req_params_v11 {
- struct iwl_scan_general_params_v10 general_params;
- struct iwl_scan_channel_params_v3 channel_params;
- struct iwl_scan_periodic_parms_v1 periodic_params;
- struct iwl_scan_probe_params_v3 probe_params;
-} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
-
-/**
* struct iwl_scan_req_params_v12
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v4
@@ -1053,18 +1024,6 @@ struct iwl_scan_req_params_v13 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
- * struct iwl_scan_req_umac_v11
- * @uid: scan id, &enum iwl_umac_scan_uid_offsets
- * @ooc_priority: out of channel priority - &enum iwl_scan_priority
- * @scan_params: scan parameters
- */
-struct iwl_scan_req_umac_v11 {
- __le32 uid;
- __le32 ooc_priority;
- struct iwl_scan_req_params_v11 scan_params;
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
-
-/**
* struct iwl_scan_req_umac_v12
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f89a9e16a8c0..f1d1fe96fecc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -813,6 +813,7 @@ enum iwl_mac_beacon_flags {
IWL_MAC_BEACON_ANT_A = BIT(9),
IWL_MAC_BEACON_ANT_B = BIT(10),
IWL_MAC_BEACON_ANT_C = BIT(11),
+ IWL_MAC_BEACON_FILS = BIT(12),
};
/**
@@ -820,6 +821,7 @@ enum iwl_mac_beacon_flags {
* @byte_cnt: byte count of the beacon frame.
* @flags: least significant byte for rate code. The most significant byte
* is &enum iwl_mac_beacon_flags.
+ * @short_ssid: Short SSID
* @reserved: reserved
* @template_id: currently equal to the mac context id of the coresponding mac.
* @tim_idx: the offset of the tim IE in the beacon
@@ -831,14 +833,15 @@ enum iwl_mac_beacon_flags {
struct iwl_mac_beacon_cmd {
__le16 byte_cnt;
__le16 flags;
- __le64 reserved;
+ __le32 short_ssid;
+ __le32 reserved;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
-} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index ed90dd104366..91df1ee25dd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -929,7 +929,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ strncpy(dump_info->dev_human_readable, fwrt->trans->name,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
@@ -1230,13 +1230,15 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iter->lmac = 0;
}
- if (!iter->internal_txf)
+ if (!iter->internal_txf) {
for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
iter->fifo_size =
cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
return true;
}
+ iter->fifo--;
+ }
iter->internal_txf = 1;
@@ -2351,9 +2353,6 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
u32 occur, delay;
unsigned long idx;
- if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
- return -EBUSY;
-
if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
tp_id);
@@ -2669,12 +2668,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
{
int ret = 0;
- /* if the FW crashed or not debug monitor cfg was given, there is
- * no point in changing the recording state
- */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
- (!fwrt->trans->dbg.dest_tlv &&
- fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return 0;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index ca3b1a461dea..89f74116569d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -320,31 +320,6 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
-static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
- char *buf, size_t count)
-{
- u32 new_domain;
- int ret;
-
- if (!iwl_trans_fw_running(fwrt->trans))
- return -EIO;
-
- ret = kstrtou32(buf, 0, &new_domain);
- if (ret)
- return ret;
-
- if (new_domain != fwrt->trans->dbg.domains_bitmap) {
- ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
- if (ret)
- return ret;
-
- iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
- NULL);
- }
-
- return count;
-}
-
static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf)
{
@@ -352,7 +327,7 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
fwrt->trans->dbg.domains_bitmap);
}
-FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
+FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
@@ -360,5 +335,5 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
- FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 994880a83652..90ca5f929cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -251,7 +251,7 @@ struct iwl_fw_dbg {
struct iwl_fw {
u32 ucode_ver;
- char fw_version[ETHTOOL_FWVERS_LEN];
+ char fw_version[64];
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index c24575ff0e54..f8c6ed823bc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -69,7 +69,7 @@
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
-#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
+#define IWL_FW_DBG_DOMAIN IWL_TRANS_FW_DBG_DOMAIN(fwrt->trans)
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
@@ -130,14 +130,6 @@ struct iwl_txf_iter_data {
};
/**
- * enum iwl_fw_runtime_status - fw runtime status flags
- * @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
- */
-enum iwl_fw_runtime_status {
- STATUS_GEN_ACTIVE_TRIGS,
-};
-
-/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
@@ -150,7 +142,6 @@ enum iwl_fw_runtime_status {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
- * @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@@ -171,8 +162,6 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
- unsigned long status;
-
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 317eac066082..be6a2bf9ce74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -285,52 +285,6 @@ struct iwl_pwr_tx_backoff {
};
/**
- * struct iwl_csr_params
- *
- * @flag_sw_reset: reset the device
- * @flag_mac_clock_ready:
- * Indicates MAC (ucode processor, etc.) is powered up and can run.
- * Internal resources are accessible.
- * NOTE: This does not indicate that the processor is actually running.
- * NOTE: This does not indicate that device has completed
- * init or post-power-down restore of internal SRAM memory.
- * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
- * SRAM is restored and uCode is in normal operation mode.
- * This note is relevant only for pre 5xxx devices.
- * NOTE: After device reset, this bit remains "0" until host sets
- * INIT_DONE
- * @flag_init_done: Host sets this to put device into fully operational
- * D0 power mode. Host resets this after SW_RESET to put device into
- * low power mode.
- * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup,
- * to allow host access to device-internal resources. Host must wait for
- * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device
- * registers.
- * @flag_val_mac_access_en: mac access is enabled
- * @flag_master_dis: disable master
- * @flag_stop_master: stop master
- * @addr_sw_reset: address for resetting the device
- * @mac_addr0_otp: first part of MAC address from OTP
- * @mac_addr1_otp: second part of MAC address from OTP
- * @mac_addr0_strap: first part of MAC address from strap
- * @mac_addr1_strap: second part of MAC address from strap
- */
-struct iwl_csr_params {
- u8 flag_sw_reset;
- u8 flag_mac_clock_ready;
- u8 flag_init_done;
- u8 flag_mac_access_req;
- u8 flag_val_mac_access_en;
- u8 flag_master_dis;
- u8 flag_stop_master;
- u8 addr_sw_reset;
- u32 mac_addr0_otp;
- u32 mac_addr1_otp;
- u32 mac_addr0_strap;
- u32 mac_addr1_strap;
-};
-
-/**
* struct iwl_cfg_trans - information needed to start the trans
*
* These values cannot be changed when multiple configs are used for a
@@ -348,7 +302,6 @@ struct iwl_csr_params {
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
- const struct iwl_csr_params *csr;
enum iwl_device_family device_family;
u32 umac_prph_offset;
u32 rf_id:1,
@@ -431,6 +384,8 @@ struct iwl_fw_mon_regs {
* @uhb_supported: ultra high band channels supported
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
* supports 256 BA aggregation
+ * @num_rbds: number of receive buffer descriptors to use
+ * (only used for multi-queue capable devices)
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -485,6 +440,7 @@ struct iwl_cfg {
u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
+ u16 num_rbds;
u32 min_umac_error_event_table;
u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr;
@@ -496,12 +452,22 @@ struct iwl_cfg {
const struct iwl_fw_mon_regs mon_smem_regs;
};
-extern const struct iwl_csr_params iwl_csr_v1;
-extern const struct iwl_csr_params iwl_csr_v2;
+#define IWL_CFG_ANY (~0)
+
+struct iwl_dev_info {
+ u16 device;
+ u16 subdevice;
+ const struct iwl_cfg *cfg;
+ const char *name;
+};
/*
* This list declares the config structures for all devices.
*/
+extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
+extern const char iwl9260_160_name[];
+extern const char iwl9560_160_name[];
+
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -595,9 +561,6 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
@@ -636,6 +599,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index 5ed07e37e3ee..eeaa8cbdddce 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,12 +64,12 @@
* the init done for driver command that configures several system modes
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
- * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * @IWL_CTXT_INFO_RB_CB_SIZE: mask of the RBD Cyclic Buffer Size
* exponent, the actual size is 2**value, valid sizes are 8-2048.
* The value is four bits long. Maximum valid exponent is 12
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
* default is short format - not supported by the driver)
- * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position
+ * @IWL_CTXT_INFO_RB_SIZE: RB size mask
* (values are IWL_CTXT_INFO_RB_SIZE_*K)
* @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
* @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
@@ -83,12 +83,12 @@
* @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
*/
enum iwl_context_info_flags {
- IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
- IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
- IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
- IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
- IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
- IWL_CTXT_INFO_RB_SIZE_POS = 9,
+ IWL_CTXT_INFO_AUTO_FUNC_INIT = 0x0001,
+ IWL_CTXT_INFO_EARLY_DEBUG = 0x0002,
+ IWL_CTXT_INFO_ENABLE_CDMP = 0x0004,
+ IWL_CTXT_INFO_RB_CB_SIZE = 0x00f0,
+ IWL_CTXT_INFO_TFD_FORMAT_LONG = 0x0100,
+ IWL_CTXT_INFO_RB_SIZE = 0x1e00,
IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 92d9898ab7c2..cb9e8e189a1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -256,6 +256,7 @@
/* RESET */
#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
@@ -278,11 +279,35 @@
* 4: GOING_TO_SLEEP
* Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
*/
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
-#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
@@ -379,7 +404,7 @@ enum {
/* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
/*
* UCODE-DRIVER GP (general purpose) mailbox register 1
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index f266647dc08c..4208e720f6e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -290,10 +290,19 @@ void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
+ u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
int ret;
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & trans->dbg.domains_bitmap)) {
+ IWL_DEBUG_FW(trans,
+ "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
+ domain, trans->dbg.domains_bitmap);
+ return;
+ }
+
if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
@@ -480,7 +489,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
if (!frag || frag->size || !pages)
return -EIO;
- while (pages) {
+ /*
+ * We try to allocate as many pages as we can, starting with
+ * the requested amount and going down until we can allocate
+ * something. Because of DIV_ROUND_UP(), pages will never go
+ * down to 0 and stop the loop, so stop when pages reaches 1,
+ * which is too small anyway.
+ */
+ while (pages > 1) {
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
&physical,
GFP_KERNEL | __GFP_NOWARN);
@@ -660,7 +676,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
list_for_each_entry(node, hcmd_list, list) {
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
- u32 domain = le32_to_cpu(hcmd->hdr.domain);
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
struct iwl_host_cmd cmd = {
.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
@@ -668,10 +683,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
.data = { hcmd_data->data, },
};
- if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
- !(domain & fwrt->trans->dbg.domains_bitmap))
- continue;
-
iwl_trans_send_cmd(fwrt->trans, &cmd);
}
}
@@ -891,55 +902,17 @@ static void
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
struct iwl_dbg_tlv_time_point_data *tp)
{
- struct iwl_dbg_tlv_node *node, *tmp;
+ struct iwl_dbg_tlv_node *node;
struct list_head *trig_list = &tp->trig_list;
struct list_head *active_trig_list = &tp->active_trig_list;
- list_for_each_entry_safe(node, tmp, active_trig_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
list_for_each_entry(node, trig_list, list) {
struct iwl_ucode_tlv *tlv = &node->tlv;
- struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
- u32 domain = le32_to_cpu(trig->hdr.domain);
-
- if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
- !(domain & fwrt->trans->dbg.domains_bitmap))
- continue;
iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
}
}
-int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
-{
- int i;
-
- if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
- return -EBUSY;
-
- iwl_fw_flush_dumps(fwrt);
-
- fwrt->trans->dbg.domains_bitmap = new_domain;
-
- IWL_DEBUG_FW(fwrt,
- "WRT: Generating active triggers list, domain 0x%x\n",
- fwrt->trans->dbg.domains_bitmap);
-
- for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
- struct iwl_dbg_tlv_time_point_data *tp =
- &fwrt->trans->dbg.time_point[i];
-
- iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
- }
-
- clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
-
- return 0;
-}
-
static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
union iwl_dbg_tlv_tp_data *tp_data,
@@ -1013,7 +986,16 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
- iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Generating active triggers list, domain 0x%x\n",
+ fwrt->trans->dbg.domains_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &fwrt->trans->dbg.time_point[i];
+
+ iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
+ }
*ini_dest = IWL_FW_INI_LOCATION_INVALID;
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index f18946872569..1360676b3b21 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -105,7 +105,6 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
-int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4096ccf58b07..2d1cb4647c3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -493,6 +493,16 @@ static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
}
}
+static const char *iwl_reduced_fw_name(struct iwl_drv *drv)
+{
+ const char *name = drv->firmware_name;
+
+ if (strncmp(name, "iwlwifi-", 8) == 0)
+ name += 8;
+
+ return name;
+}
+
static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
@@ -551,12 +561,12 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u.%u%s",
+ "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver),
- buildstr);
+ buildstr, iwl_reduced_fw_name(drv));
/* Verify size of file vs. image size info in file's header */
@@ -636,12 +646,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u.%u%s",
+ "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver),
- buildstr);
+ buildstr, iwl_reduced_fw_name(drv));
data = ucode->data;
@@ -895,11 +905,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (major >= 35)
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%08x.%u", major, minor, local_comp);
+ "%u.%08x.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
else
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u", major, minor, local_comp);
+ "%u.%u.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1647,6 +1659,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
#endif
+ drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
+
ret = iwl_request_firmware(drv, true);
if (ret) {
IWL_ERR(trans, "Couldn't request the fw\n");
@@ -1817,9 +1831,6 @@ MODULE_PARM_DESC(antenna_coupling,
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
MODULE_PARM_DESC(nvm_file, "NVM file name");
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 8836f85afe85..bf673ce5f183 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -611,10 +611,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-#define MQ_RX_TABLE_SIZE 512
-#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1)
-#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1)
-#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
+#define RX_POOL_SIZE(rbds) ((rbds) - 1 + \
IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
/* cb size is the exponent */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 1b7414bf7bef..2139f0b8f2bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -70,36 +70,6 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
-const struct iwl_csr_params iwl_csr_v1 = {
- .flag_mac_clock_ready = 0,
- .flag_val_mac_access_en = 0,
- .flag_init_done = 2,
- .flag_mac_access_req = 3,
- .flag_sw_reset = 7,
- .flag_master_dis = 8,
- .flag_stop_master = 9,
- .addr_sw_reset = CSR_BASE + 0x020,
- .mac_addr0_otp = 0x380,
- .mac_addr1_otp = 0x384,
- .mac_addr0_strap = 0x388,
- .mac_addr1_strap = 0x38C
-};
-
-const struct iwl_csr_params iwl_csr_v2 = {
- .flag_init_done = 6,
- .flag_mac_clock_ready = 20,
- .flag_val_mac_access_en = 20,
- .flag_mac_access_req = 21,
- .flag_master_dis = 28,
- .flag_stop_master = 29,
- .flag_sw_reset = 31,
- .addr_sw_reset = CSR_BASE + 0x024,
- .mac_addr0_otp = 0x30,
- .mac_addr1_otp = 0x34,
- .mac_addr0_strap = 0x38,
- .mac_addr1_strap = 0x3C
-};
-
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
@@ -506,8 +476,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(cfg_trans->csr->flag_init_done));
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -518,8 +487,8 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* and accesses to uCode SRAM.
*/
err = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(cfg_trans->csr->flag_mac_clock_ready),
- BIT(cfg_trans->csr->flag_mac_clock_ready),
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (err < 0)
IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index ebea3f308b5d..82e5cac23d8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
@@ -136,7 +135,6 @@ struct iwl_mod_params {
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
- bool lar_disable;
bool fw_monitor;
bool disable_11ac;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 1e240a2a8329..bab0999f002c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DC_HIGH = BIT(12),
};
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+ REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
+ REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
+ REG_CAPA_160MHZ_ALLOWED = BIT(2),
+ REG_CAPA_80MHZ_ALLOWED = BIT(3),
+ REG_CAPA_MCS_8_ALLOWED = BIT(4),
+ REG_CAPA_MCS_9_ALLOWED = BIT(5),
+ REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
+ REG_CAPA_DC_HIGH_ENABLED = BIT(9),
+};
+
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -801,12 +829,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data)
{
- __le32 mac_addr0 =
- cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr0_strap));
- __le32 mac_addr1 =
- cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr1_strap));
+ __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
+ __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
/*
@@ -816,10 +840,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
if (is_valid_ether_addr(data->hw_addr))
return;
- mac_addr0 = cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr0_otp));
- mac_addr1 = cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr1_otp));
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
@@ -939,10 +961,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+ u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
bool lar_enabled;
@@ -1022,7 +1045,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
- if (lar_fw_supported && lar_enabled)
+ if (lar_enabled &&
+ fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
@@ -1038,6 +1062,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
+ u16 cap_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1076,13 +1101,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
(flags & NL80211_RRF_NO_IR))
flags |= NL80211_RRF_GO_CONCURRENT;
+ /*
+ * cap_flags is per regulatory domain so apply it for every channel
+ */
+ if (ch_idx >= NUM_2GHZ_CHANNELS) {
+ if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_160MHZ;
+ }
+
return flags;
}
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info)
+ u16 geo_info, u16 cap)
{
int ch_idx;
u16 ch_flags;
@@ -1140,7 +1179,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ ch_flags, cap,
+ cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -1405,9 +1445,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
};
int ret;
- bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
@@ -1485,7 +1522,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
- if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+ if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+ fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
nvm->lar_enabled = true;
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index b7e1ddf8f177..fb0b385d10fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
*/
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+ u8 tx_chains, u8 rx_chains);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
@@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info);
+ u16 geo_info, u16 cap);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 14c8ba23f3b9..1136d9784f9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -411,13 +411,6 @@ enum {
HW_STEP_LOCATION_BITS = 24,
};
-#define AUX_MISC_MASTER1_EN 0xA20818
-enum aux_misc_master1_en {
- AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
-};
-
-#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
-#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
@@ -430,6 +423,9 @@ enum aux_misc_master1_en {
#define UMAG_SB_CPU_1_STATUS 0xA038C0
#define UMAG_SB_CPU_2_STATUS 0xA038C4
#define UMAG_GEN_HW_STATUS 0xA038C8
+#define UREG_UMAC_CURRENT_PC 0xa05c18
+#define UREG_LMAC1_CURRENT_PC 0xa05c1c
+#define UREG_LMAC2_CURRENT_PC 0xa05c20
/* For UMAG_GEN_HW_STATUS reg check */
enum {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 28bdc9a9617e..f91197e4ae40 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -66,7 +66,9 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops)
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd),
- sizeof(void *),
- SLAB_HWCACHE_ALIGN,
- NULL);
+ cmd_pool_size, cmd_pool_align,
+ SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 8cadad7364ac..7b3b1f4c99b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -112,6 +112,8 @@
* 6) Eventually, the free function will be called.
*/
+#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
+
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40
@@ -193,6 +195,18 @@ struct iwl_device_cmd {
};
} __packed;
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+ struct iwl_cmd_header hdr;
+ u8 payload[];
+} __packed;
+
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
/*
@@ -358,6 +372,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
}
}
+static inline int
+iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
+{
+ switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return 2 * 1024;
+ case IWL_AMSDU_4K:
+ return 4 * 1024;
+ case IWL_AMSDU_8K:
+ return 8 * 1024;
+ case IWL_AMSDU_12K:
+ return 12 * 1024;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
struct iwl_hcmd_names {
u8 cmd_id;
const char *const cmd_name;
@@ -544,7 +576,7 @@ struct iwl_trans_ops {
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue);
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
@@ -839,6 +871,8 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
+ const char *name;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -948,22 +982,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
return trans->ops->dump_data(trans, dump_mask);
}
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+ return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_cmd *dev_cmd)
+ struct iwl_device_tx_cmd *dev_cmd)
{
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue)
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -1271,7 +1305,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops);
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 60aff2ecec12..58df25e2fb32 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -154,5 +154,6 @@
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
+#define IWL_MVM_USE_NSSN_SYNC 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 43ebb2149b63..8878409d2f07 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -989,6 +989,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) {
ret = 1;
@@ -1083,6 +1085,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
ieee80211_restart_hw(mvm->hw);
}
}
+
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
}
out_noreset:
mutex_unlock(&mvm->mutex);
@@ -1929,6 +1933,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index aa659162a7c2..190cf15b825c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -752,7 +752,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- mvm->fwrt.trans->cfg->name);
+ mvm->fwrt.trans->name);
pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
mvm->fwrt.dev->bus->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 9f4b117db9d7..f783d6d53b6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -208,10 +208,11 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
-static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- u8 *channel, u8 *bandwidth,
- u8 *ctrl_ch_position)
+static int
+iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *bandwidth,
+ u8 *ctrl_ch_position)
{
u32 freq = peer->chandef.chan->center_freq;
@@ -243,15 +244,54 @@ static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
}
static int
+iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ *channel = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+ }
+
+ *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
+
+ return 0;
+}
+
+static int
iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v2 *target)
{
int ret;
- ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
- &target->bandwidth,
- &target->ctrl_ch_position);
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
if (ret)
return ret;
@@ -278,18 +318,11 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
-static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
{
- int ret;
-
- ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
- &target->bandwidth,
- &target->ctrl_ch_position);
- if (ret)
- return ret;
-
memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period =
cpu_to_le16(peer->ftm.burst_period);
@@ -314,60 +347,166 @@ static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
FTM_PUT_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_PUT_FLAG(ALGO_FFT);
+}
+
+static int
+iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v3 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ /*
+ * Versions 3 and 4 has some common fields, so
+ * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0;
}
-int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_pmsr_request *req)
+static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
+{
+ u32 status;
+ int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
+
+ if (!err && status) {
+ IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
+ status);
+ err = iwl_ftm_range_request_status_to_err(status);
+ }
+
+ return err;
+}
+
+static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
- struct iwl_tof_range_req_cmd cmd;
- bool new_api = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
- u8 num_of_ap;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v5,
+ .len[0] = sizeof(cmd_v5),
};
- u32 status = 0;
- int err, i;
+ u8 i;
+ int err;
- lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
- if (mvm->ftm_initiator.req)
- return -EBUSY;
+ for (i = 0; i < cmd_v5.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- if (new_api) {
- iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
- hcmd.data[0] = &cmd;
- hcmd.len[0] = sizeof(cmd);
- num_of_ap = cmd.num_of_ap;
- } else {
- iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
- hcmd.data[0] = &cmd_v5;
- hcmd.len[0] = sizeof(cmd_v5);
- num_of_ap = cmd_v5.num_of_ap;
+ err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
+ if (err)
+ return err;
}
- for (i = 0; i < num_of_ap; i++) {
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v7 cmd_v7;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v7,
+ .len[0] = sizeof(cmd_v7),
+ };
+ u8 i;
+ int err;
+
+ /*
+ * Versions 7 and 8 has the same structure except from the responders
+ * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req);
+
+ for (i = 0; i < cmd_v7.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- if (new_api)
- err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
- else
- err = iwl_mvm_ftm_put_target_v2(mvm, peer,
- &cmd_v5.ap[i]);
+ err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
if (err)
return err;
}
- err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
- if (!err && status) {
- IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
- status);
- err = iwl_ftm_range_request_status_to_err(status);
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ int err;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->ftm_initiator.req)
+ return -EBUSY;
+
+ if (new_api) {
+ u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD);
+
+ if (cmd_ver == 8)
+ err = iwl_mvm_ftm_start_v8(mvm, vif, req);
+ else
+ err = iwl_mvm_ftm_start_v7(mvm, vif, req);
+
+ } else {
+ err = iwl_mvm_ftm_start_v5(mvm, vif, req);
}
if (!err) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 1513b8b4062f..834564198409 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,12 +62,72 @@
#include "mvm.h"
#include "constants.h"
+static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
+ u8 *bw, u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *bw = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *bw = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *bw = IWL_TOF_BW_40;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *bw = IWL_TOF_BW_80;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
+ u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
static int
iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ /*
+ * The command structure is the same for versions 6 and 7, (only the
+ * field interpretation is different), so the same struct can be use
+ * for all cases.
+ */
struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
@@ -76,27 +136,22 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
+ u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_CONFIG_CMD);
+ int err;
lockdep_assert_held(&mvm->mutex);
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- cmd.bandwidth = IWL_TOF_BW_20_LEGACY;
- break;
- case NL80211_CHAN_WIDTH_20:
- cmd.bandwidth = IWL_TOF_BW_20_HT;
- break;
- case NL80211_CHAN_WIDTH_40:
- cmd.bandwidth = IWL_TOF_BW_40;
- cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
- break;
- case NL80211_CHAN_WIDTH_80:
- cmd.bandwidth = IWL_TOF_BW_80;
- cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
+ if (cmd_ver == 7)
+ err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+ else
+ err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+
+ if (err) {
+ IWL_ERR(mvm, "Failed to set responder bandwidth\n");
+ return err;
}
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index dd685f7eb410..54c094e88474 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -353,22 +353,35 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (ret == -ETIMEDOUT)
- iwl_fw_dbg_error_collect(&mvm->fwrt,
- FW_DBG_TRIGGER_ALIVE_TIMEOUT);
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_22000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
- else if (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000)
+ IWL_ERR(mvm, "UMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_UMAC_CURRENT_PC));
+ IWL_ERR(mvm, "LMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC1_CURRENT_PC));
+ if (iwl_mvm_is_cdb_supported(mvm))
+ IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC2_CURRENT_PC));
+ } else if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS));
+ }
+
+ if (ret == -ETIMEDOUT)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+
iwl_fw_set_current_image(&mvm->fwrt, old_type);
return ret;
}
@@ -841,9 +854,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return 0;
}
+ if (!mvm->fwrt.ppag_table.enabled) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG not enabled, command not sent.\n");
+ return 0;
+ }
+
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
- IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 32dc9d6f0fb6..6717f25c46b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
- __le16_to_cpu(resp->geo_info));
+ __le16_to_cpu(resp->geo_info),
+ __le16_to_cpu(resp->cap));
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
@@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ if (likely(sta)) {
+ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+ return;
+ } else {
+ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+ return;
+ }
+
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
}
- if (sta) {
- if (iwl_mvm_tx_skb(mvm, skb, sta))
- goto drop;
- return;
- }
-
- if (iwl_mvm_tx_skb_non_sta(mvm, skb))
- goto drop;
+ iwl_mvm_tx_skb(mvm, skb, sta);
return;
drop:
ieee80211_free_txskb(hw, skb);
@@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
break;
}
- if (!txq->sta)
- iwl_mvm_tx_skb_non_sta(mvm, skb);
- else
- iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
} while (atomic_dec_return(&mvmtxq->tx_request));
rcu_read_unlock();
@@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
return ret;
}
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_HT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ u32 gi_ltf = u32_get_bits(rate_n_flags,
+ RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ } else {
+ switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+ case IWL_RATE_1M_PLCP:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_PLCP:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_PLCP:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_PLCP:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_PLCP:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_PLCP:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_PLCP:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_PLCP:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_PLCP:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_PLCP:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_PLCP:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_PLCP:
+ rinfo->legacy = 540;
+ break;
+ }
+ }
+}
+
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 3ec8de00f3aa..d6ecc2d2bf21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1160,6 +1160,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
+ * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1170,6 +1171,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
+ IWL_MVM_STATUS_IN_D3,
};
/* Keep track of completed init configuration */
@@ -1298,9 +1300,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
- if (iwlwifi_mod_params.lar_disable)
- return false;
-
/*
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
@@ -1508,8 +1507,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 945c1ea5cda8..70b29bf16bb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -178,7 +178,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
} else {
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
- ret, mvm->cfg->name);
+ ret, mvm->trans->name);
ret = -ENODATA;
}
goto exit;
@@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
struct iwl_nvm_section *sections = mvm->nvm_sections;
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
- bool lar_enabled;
int regulatory_type;
/* Checking for required sections */
- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
- lar_enabled = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
- return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
regulatory, mac_override, phy_sku,
- mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
- lar_enabled);
+ mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
}
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 1b07a8e8f069..dfe02440d474 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -830,7 +830,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
- mvm->cfg->name, mvm->trans->hw_rev);
+ mvm->trans->name, mvm->trans->hw_rev);
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 25d7faea1c62..c146303ec73b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -198,7 +198,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
if (!mvmvif->queue_params[ac].uapsd)
continue;
- if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN)
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
@@ -233,15 +233,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window =
- (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
+ cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd =
@@ -354,8 +354,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_power_cmd *cmd,
- bool host_awake)
+ struct iwl_mac_power_cmd *cmd)
{
int dtimper = vif->bss_conf.dtim_period ?: 1;
int skip;
@@ -370,7 +369,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
if (dtimper >= 10)
return;
- if (host_awake) {
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
skip = 2;
@@ -390,8 +389,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_power_cmd *cmd,
- bool host_awake)
+ struct iwl_mac_power_cmd *cmd)
{
int dtimper, bi;
int keep_alive;
@@ -437,9 +435,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
}
- iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
+ iwl_mvm_power_config_skip_dtim(mvm, vif, cmd);
- if (!host_awake) {
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
@@ -512,8 +510,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{
struct iwl_mac_power_cmd cmd = {};
- iwl_mvm_power_build_cmd(mvm, vif, &cmd,
- mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
@@ -536,7 +533,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
mvm->disable_power_off_d3 : mvm->disable_power_off)
cmd.flags &=
cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -943,7 +940,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
if (!mvmvif->bf_data.bf_enabled)
return 0;
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ef99c49247b7..c15f7dbc9516 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
{
- struct iwl_mvm_rss_sync_notif notif = {
- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
- .metadata.sync = 0,
- .nssn_sync.baid = baid,
- .nssn_sync.nssn = nssn,
- };
-
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+ if (IWL_MVM_USE_NSSN_SYNC) {
+ struct iwl_mvm_rss_sync_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+ .metadata.sync = 0,
+ .nssn_sync.baid = baid,
+ .nssn_sync.nssn = nssn,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
+ sizeof(notif));
+ }
}
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a046ac9fa852..3b263c81bcae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
cmd_size = sizeof(struct iwl_scan_config_v2);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
- cmd_size += num_channels;
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
if (!cfg)
@@ -1907,20 +1907,6 @@ iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
}
static void
-iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
- struct iwl_mvm_scan_params *params,
- struct ieee80211_vif *vif,
- struct iwl_scan_channel_params_v3 *cp)
-{
- cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
- cp->count = params->n_channels;
-
- iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
- params->n_channels, 0,
- cp->channel_config);
-}
-
-static void
iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
@@ -1937,37 +1923,6 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
vif->type);
}
-static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params, int type,
- int uid)
-{
- struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
- struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
- int ret;
- u16 gen_flags;
-
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
- cmd->uid = cpu_to_le32(uid);
-
- gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
- &scan_p->general_params,
- gen_flags);
-
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
- if (ret)
- return ret;
-
- iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
- iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
- &scan_p->channel_params);
-
- return 0;
-}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
@@ -2152,7 +2107,6 @@ static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
- IWL_SCAN_UMAC_HANDLER(11),
};
static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
@@ -2511,7 +2465,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dc5c02fbc65a..a8d0d17f79fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(!dev_cmd))
return NULL;
- /* Make sure we zero enough of dev_cmd */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
- memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -597,7 +592,7 @@ out:
}
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
- struct iwl_device_cmd *cmd)
+ struct iwl_device_tx_cmd *cmd)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
@@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info info;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
@@ -847,10 +842,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
else if (next)
consume_skb(skb);
- while (next) {
- tmp = next;
- next = tmp->next;
-
+ skb_list_walk_safe(next, tmp, next) {
memcpy(tmp->cb, cb, sizeof(tmp->cb));
/*
* Compute the length of all the data added for the A-MSDU.
@@ -880,9 +872,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
skb_shinfo(tmp)->gso_size = 0;
}
- tmp->prev = NULL;
- tmp->next = NULL;
-
+ skb_mark_not_on_list(tmp);
__skb_queue_tail(mpdus_skb, tmp);
i++;
}
@@ -1078,7 +1068,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
@@ -1154,7 +1144,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
- return 0;
+ return -1;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1206,8 +1196,8 @@ drop:
return -1;
}
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index a4e09a5b1816..01f248ba8fec 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+ cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d38cefbb779e..acd01d86f101 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -57,6 +57,42 @@
#include "internal.h"
#include "iwl-prph.h"
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys,
+ int depth)
+{
+ void *result;
+
+ if (WARN(depth > 2,
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
+ return NULL;
+
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+ if (!result)
+ return NULL;
+
+ if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ void *old = result;
+ dma_addr_t oldphys = *phys;
+
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+ phys,
+ depth + 1);
+ dma_free_coherent(trans->dev, size, old, oldphys);
+ }
+
+ return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys)
+{
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
+ dma_addr_t phys;
int ret;
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+ sizeof(*ctxt_info),
+ &phys);
if (!ctxt_info)
return -ENOMEM;
+ trans_pcie->ctxt_info_dma_addr = phys;
+
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
@@ -193,11 +232,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
- control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
- (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
- IWL_CTXT_INFO_RB_CB_SIZE_POS) |
- (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
+ WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
+ control_flags |=
+ u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
+ IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b0b7eca1754e..97f227f3cbc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -565,14 +565,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
+
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -598,21 +592,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
@@ -910,7 +895,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
@@ -987,28 +971,74 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name }
+
+static const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLMVM)
+ IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+
+ IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+#endif /* CONFIG_IWLMVM */
+};
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg_trans_params *trans =
+ (struct iwl_cfg_trans_params *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
+ struct iwl_trans_pcie *trans_pcie;
unsigned long flags;
- int ret;
+ int i, ret;
+ /*
+ * This is needed for backwards compatibility with the old
+ * tables, so we don't need to change all the config structs
+ * at the same time. The cfg is used to compare with the old
+ * full cfg structs.
+ */
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans);
+ /* make sure trans is the first element in iwl_cfg */
+ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+
+ iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
- /* the trans_cfg should never change, so set it now */
- iwl_trans->trans_cfg = &cfg->trans;
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- if (WARN_ONCE(!iwl_trans->trans_cfg->csr,
- "CSR addresses aren't configured\n")) {
- ret = -EINVAL;
- goto out_free_trans;
+ /* the trans_cfg should never change, so set it now */
+ iwl_trans->trans_cfg = trans;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
+ const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+
+ if ((dev_info->device == IWL_CFG_ANY ||
+ dev_info->device == pdev->device) &&
+ (dev_info->subdevice == IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device)) {
+ iwl_trans->cfg = dev_info->cfg;
+ iwl_trans->name = dev_info->name;
+ goto found;
+ }
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1027,22 +1057,22 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
- cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
- cfg = &iwlax210_2ax_cfg_ty_gf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- cfg = &iwlax211_2ax_cfg_so_gf_a0;
+ iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- cfg = &iwlax411_2ax_cfg_so_gf4_a0;
+ iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
@@ -1050,13 +1080,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
(CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- cfg = &iwl_ax101_cfg_qu_hr;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwl22000_2ax_cfg_jf;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
@@ -1073,19 +1107,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
- /*
- * b step fw is the same for physical card and fpga
- */
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_A_STEP) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
- } else {
- /*
- * a step no FPGA
- */
- cfg = &iwl22000_2ac_cfg_hr;
- }
+ CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
+ SILICON_A_STEP)
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
@@ -1096,17 +1122,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
if (cfg == &iwl_ax101_cfg_qu_hr)
- cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
else if (cfg == &iwl_ax201_cfg_qu_hr)
- cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
/* same thing for QuZ... */
@@ -1126,8 +1156,27 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
- /* now set the real cfg we decided to use */
- iwl_trans->cfg = cfg;
+ /*
+ * If we didn't set the cfg yet, assume the trans is actually
+ * a full cfg from the old tables.
+ */
+ if (!iwl_trans->cfg)
+ iwl_trans->cfg = cfg;
+
+found:
+ /* if we don't have a name yet, copy name from the old cfg */
+ if (!iwl_trans->name)
+ iwl_trans->name = iwl_trans->cfg->name;
+
+ if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
+ ret = -EINVAL;
+ goto out_free_trans;
+ }
+ trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ } else {
+ trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
+ }
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 &&
iwl_trans_grab_nic_access(iwl_trans, &flags)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a091690f6c79..72f144c3a46e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,6 +106,8 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @offset: indicates which offset of the page (in bytes)
+ * this buffer uses (if multiple RBs fit into one page)
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 offset;
};
/**
@@ -305,7 +308,7 @@ struct iwl_cmd_meta {
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
- struct iwl_device_cmd *cmd;
+ void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
@@ -491,6 +494,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
@@ -510,11 +514,16 @@ struct cont_rec {
* @in_rescan: true if we have triggered a device rescan
* @base_rb_stts: base virtual address of receive buffer status for all queues
* @base_rb_stts_dma: base physical address of receive buffer status
+ * @supported_dma_mask: DMA mask to validate the actual address against,
+ * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
+ * @alloc_page_lock: spinlock for the page allocator
+ * @alloc_page: allocated page to still use parts of
+ * @alloc_page_used: how much of the allocated page was already used (bytes)
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
- struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
+ struct iwl_rx_mem_buffer *rx_pool;
+ struct iwl_rx_mem_buffer **global_table;
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
@@ -573,6 +582,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
u16 tfd_size;
+ u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
@@ -580,6 +590,13 @@ struct iwl_trans_pcie {
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
+ u32 rx_buf_bytes;
+ u32 supported_dma_mask;
+
+ /* allocator lock for the two values below */
+ spinlock_t alloc_page_lock;
+ struct page *alloc_page;
+ u32 alloc_page_used;
/*protect hw register */
spinlock_t reg_lock;
@@ -672,6 +689,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
int queue_size);
@@ -688,7 +715,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1109,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb);
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
#endif
/* common functions that are used by gen3 transport */
@@ -1106,7 +1134,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 452da44a21e0..427fcea5cb2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -240,7 +240,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
rxq->need_update = true;
return;
}
@@ -298,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
/*
@@ -318,8 +319,8 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
list);
list_del(&rxb->list);
rxb->invalid = false;
- /* 12 first bits are expected to be empty */
- WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+ /* some low bits are expected to be unset (depending on hw) */
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
@@ -412,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
*
*/
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
- gfp_t priority)
+ u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
struct page *page;
gfp_t gfp_mask = priority;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
+ if (trans_pcie->alloc_page) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ /* recheck */
+ if (trans_pcie->alloc_page) {
+ *offset = trans_pcie->alloc_page_used;
+ page = trans_pcie->alloc_page;
+ trans_pcie->alloc_page_used += rbsize;
+ if (trans_pcie->alloc_page_used >= allocsize)
+ trans_pcie->alloc_page = NULL;
+ else
+ get_page(page);
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ return page;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
@@ -436,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
"Failed to alloc_pages\n");
return NULL;
}
+
+ if (2 * rbsize <= allocsize) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ if (!trans_pcie->alloc_page) {
+ get_page(page);
+ trans_pcie->alloc_page = page;
+ trans_pcie->alloc_page_used = rbsize;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ *offset = 0;
return page;
}
@@ -456,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct page *page;
while (1) {
+ unsigned int offset;
+
spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
@@ -463,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
spin_unlock(&rxq->lock);
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, priority);
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
@@ -482,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
BUG_ON(rxb->page);
rxb->page = page;
+ rxb->offset = offset;
/* Get physical address of the RB */
rxb->page_dma =
- dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
@@ -510,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < RX_POOL_SIZE; i++) {
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
if (!trans_pcie->rx_pool[i].page)
continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL;
@@ -568,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
+ gfp_mask);
if (!page)
continue;
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, page,
+ rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order);
@@ -738,7 +773,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
spin_lock_init(&rxq->lock);
if (trans->trans_cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
+ rxq->queue_size = trans->cfg->num_rbds;
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -807,8 +842,18 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -ENOMEM;
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->rx_pool[0]),
+ GFP_KERNEL);
+ trans_pcie->global_table =
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->global_table[0]),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
+ !trans_pcie->global_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
spin_lock_init(&rba->lock);
@@ -845,6 +890,8 @@ err:
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
return ret;
@@ -1081,12 +1128,11 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
/* move the pool to the default queue and allocator ownerships */
queue_size = trans->trans_cfg->mq_rx_supported ?
- MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
- BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
- ARRAY_SIZE(trans_pcie->rx_pool));
+
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1177,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
+
+ if (trans_pcie->alloc_page)
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
}
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
@@ -1235,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
if (WARN_ON(!rxb))
@@ -1249,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool reclaim;
int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
- ._offset = offset,
+ ._offset = rxb->offset + offset,
._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
@@ -1355,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* rx_free list for reuse later. */
if (rxb->page != NULL) {
rxb->page_dma =
- dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
@@ -1390,13 +1441,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ vid = le16_to_cpu(rxq->cd[i].rbid);
else
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
- if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
rxb = trans_pcie->global_table[vid - 1];
@@ -1529,13 +1579,13 @@ out:
napi = &rxq->napi;
if (napi->poll) {
+ napi_gro_flush(napi, false);
+
if (napi->rx_count) {
netif_receive_skb_list(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}
-
- napi_gro_flush(napi, false);
}
iwl_pcie_rxq_restock(trans, rxq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0d8b2a8ffa5d..19a2c72081ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -132,8 +132,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
@@ -175,7 +174,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a0677131634d..38d8fe21690a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -79,6 +79,7 @@
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
+#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -183,8 +184,7 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_sw_reset));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
@@ -301,18 +301,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
u16 cap;
/*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
*/
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- else
- iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -487,8 +482,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -510,12 +504,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_stop_master));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_master_dis),
- BIT(trans->trans_cfg->csr->flag_master_dis), 100);
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -564,8 +557,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -1270,7 +1262,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1494,9 +1486,8 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (reset) {
/*
@@ -1561,7 +1552,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
@@ -1583,7 +1574,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1945,6 +1936,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ trans_pcie->rx_buf_bytes =
+ iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2054,7 +2050,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -2079,8 +2075,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2162,7 +2158,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2963,7 +2959,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
@@ -2989,9 +2985,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len);
/* remap the page for the free benefit */
- rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
- max_len,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page,
+ rxb->offset, max_len,
+ DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data);
}
@@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size;
+ int ret, addr_size, txcmd_size, txcmd_align;
+ const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+ if (!cfg_trans->gen2) {
+ ops = &trans_ops_pcie;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- if (cfg_trans->gen2)
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie_gen2);
- else
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie);
-
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3482,6 +3493,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8ca0250de99e..86fc00167817 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
return idx;
}
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
/*
* Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
dev_kfree_skb(csum_skb);
goto out_err;
}
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
/* put the payload */
while (data_left) {
+ int ret;
+
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
dev_kfree_skb(csum_skb);
goto out_err;
}
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
- int tb_idx;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
- if (!skb_frag_size(frag))
+ if (!fragsz)
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -ENOMEM;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
- tb_phys, skb_frag_size(frag));
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
}
return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
- tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
skb_walk_frags(skb, frag) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, frag->data,
skb_headlen(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
- tb_phys, skb_headlen(frag));
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
goto out_err;
}
@@ -538,7 +670,7 @@ out_err:
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
@@ -587,6 +719,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
int idx;
void *tfd;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -603,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1101,9 +1237,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq;
int i;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txq[txq_id];
+
if (WARN_ON(!txq))
return;
@@ -1258,6 +1400,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index f21f16ab2a97..2f69a6469fe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->write_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[read_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
txq->need_update = true;
return;
}
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb)
{
struct page **page_ptr;
+ struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
- if (*page_ptr) {
- __free_page(*page_ptr);
- *page_ptr = NULL;
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
}
}
@@ -646,7 +652,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
/*
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_cmd *dev_cmd_ptr;
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1255,16 +1261,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
if (!p->page)
goto alloc;
- /* enough room on this page */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
- return p;
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
/* We don't have enough room on this page, get a new one. */
__free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
if (!p->page)
return NULL;
p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
return p;
}
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 0094b1d2b577..3ec46f48cfde 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -2508,7 +2508,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap,
sta->supported_rates[0] = 2;
if (sta->tx_supp_rates & WLAN_RATE_2M)
sta->supported_rates[1] = 4;
- if (sta->tx_supp_rates & WLAN_RATE_5M5)
+ if (sta->tx_supp_rates & WLAN_RATE_5M5)
sta->supported_rates[2] = 11;
if (sta->tx_supp_rates & WLAN_RATE_11M)
sta->supported_rates[3] = 22;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 05466281afb6..de97b3304115 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -761,7 +761,7 @@ static void hostap_set_multicast_list(struct net_device *dev)
}
-static void prism2_tx_timeout(struct net_device *dev)
+static void prism2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct hostap_interface *iface;
local_info_t *local;
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 28dac36d7c4c..00264a14e52c 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -647,7 +647,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
netif_wake_queue(dev);
}
-void orinoco_tx_timeout(struct net_device *dev)
+void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &dev->stats;
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 430862a6a24b..cdd026af100b 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -207,7 +207,7 @@ int orinoco_open(struct net_device *dev);
int orinoco_stop(struct net_device *dev);
void orinoco_set_multicast_list(struct net_device *dev);
int orinoco_change_mtu(struct net_device *dev, int new_mtu);
-void orinoco_tx_timeout(struct net_device *dev);
+void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue);
/********************************************************************/
/* Locking and synchronization functions */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 40a8b941ad5c..e753f43e0162 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1361,7 +1361,8 @@ static int ezusb_init(struct hermes *hw)
int retval;
BUG_ON(in_interrupt());
- BUG_ON(!upriv);
+ if (!upriv)
+ return -EINVAL;
upriv->reply_count = 0;
/* Write the MAGIC number on the simulated registers to keep
@@ -1608,9 +1609,9 @@ static int ezusb_probe(struct usb_interface *interface,
/* set up the endpoint information */
/* check out the endpoints */
- iface_desc = &interface->altsetting[0].desc;
+ iface_desc = &interface->cur_altsetting->desc;
for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
- ep = &interface->altsetting[0].endpoint[i].desc;
+ ep = &interface->cur_altsetting->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(ep)) {
/* we found a bulk in endpoint */
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c
index 2b8fb07d07e7..8d680250a281 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c
@@ -473,7 +473,7 @@ islpci_do_reset_and_wake(struct work_struct *work)
}
void
-islpci_eth_tx_timeout(struct net_device *ndev)
+islpci_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
islpci_private *priv = netdev_priv(ndev);
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.h b/drivers/net/wireless/intersil/prism54/islpci_eth.h
index 61f4b43c6054..e433ccdc526b 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.h
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.h
@@ -53,7 +53,7 @@ struct avs_80211_1_header {
void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
netdev_tx_t islpci_eth_transmit(struct sk_buff *, struct net_device *);
int islpci_eth_receive(islpci_private *);
-void islpci_eth_tx_timeout(struct net_device *);
+void islpci_eth_tx_timeout(struct net_device *, unsigned int txqueue);
void islpci_do_reset_and_wake(struct work_struct *);
#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 57edfada0665..c9401c121a14 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
int hw, ap, ap_max = ie[1];
u8 hw_rate;
+ if (ap_max > MAX_RATES) {
+ lbs_deb_assoc("invalid rates\n");
+ return tlv;
+ }
/* Advance past IE header */
ie += 2;
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
+ int hw, i;
+ u8 rates_max;
+ u8 *rates;
/* TODO: set preamble based on scan result */
ret = lbs_set_radio(priv, preamble, 1);
@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
- int hw, i;
- u8 rates_max = rates_eid[1];
- u8 *rates = cmd.bss.rates;
+ rates_max = rates_eid[1];
+ if (rates_max > MAX_RATES) {
+ lbs_deb_join("invalid rates");
+ goto out;
+ }
+ rates = cmd.bss.rates;
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
u8 hw_rate = lbs_rates[hw].bitrate / 5;
for (i = 0; i < rates_max; i++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index d14e55e3c9da..7d94695e7961 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1020,7 +1020,7 @@ static void mwifiex_set_multicast_list(struct net_device *dev)
* CFG802.11 network device handler for transmission timeout.
*/
static void
-mwifiex_tx_timeout(struct net_device *dev)
+mwifiex_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 547ff3c578ee..fa5634af40f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1295,19 +1295,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
return pos;
}
-/* This function return interface number with the same bss_type.
- */
-static inline u8
-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
-{
- u8 i, num = 0;
-
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
- num++;
- return num;
-}
-
/*
* This function returns the correct private structure pointer based
* upon the BSS type and BSS number.
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 7caf1d26124a..f8f282ce39bd 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -894,7 +894,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *peer, *pos, *end;
u8 i, action, basic;
u16 cap = 0;
- int ie_len = 0;
+ int ies_len = 0;
if (len < (sizeof(struct ethhdr) + 3))
return;
@@ -916,7 +916,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
pos = buf + sizeof(struct ethhdr) + 4;
/* payload 1+ category 1 + action 1 + dialog 1 */
cap = get_unaligned_le16(pos);
- ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
pos += 2;
break;
@@ -926,7 +926,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
pos = buf + sizeof(struct ethhdr) + 6;
cap = get_unaligned_le16(pos);
- ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
pos += 2;
break;
@@ -934,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
return;
pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
- ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
break;
default:
mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
@@ -947,33 +947,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
- for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
- if (pos + 2 + pos[1] > end)
+ for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
+ u8 ie_len = pos[1];
+
+ if (pos + 2 + ie_len > end)
break;
switch (*pos) {
case WLAN_EID_SUPP_RATES:
- if (pos[1] > 32)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
return;
- sta_ptr->tdls_cap.rates_len = pos[1];
- for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates_len = ie_len;
+ for (i = 0; i < ie_len; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
- if (pos[1] > 32)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
return;
basic = sta_ptr->tdls_cap.rates_len;
- if (pos[1] > 32 - basic)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
return;
- for (i = 0; i < pos[1]; i++)
+ for (i = 0; i < ie_len; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
- sta_ptr->tdls_cap.rates_len += pos[1];
+ sta_ptr->tdls_cap.rates_len += ie_len;
break;
case WLAN_EID_HT_CAPABILITY:
- if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
- return;
- if (pos[1] != sizeof(struct ieee80211_ht_cap))
+ if (ie_len != sizeof(struct ieee80211_ht_cap))
return;
/* copy the ie's value into ht_capb*/
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
@@ -981,59 +981,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
- if (pos > end -
- sizeof(struct ieee80211_ht_operation) - 2)
- return;
- if (pos[1] != sizeof(struct ieee80211_ht_operation))
+ if (ie_len != sizeof(struct ieee80211_ht_operation))
return;
/* copy the ie's value into ht_oper*/
memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
- if (pos > end - 3)
- return;
- if (pos[1] != 1)
+ if (ie_len != sizeof(pos[2]))
return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
- if (pos > end - sizeof(struct ieee_types_header))
- return;
- if (pos[1] < sizeof(struct ieee_types_header))
+ if (ie_len < sizeof(struct ieee_types_header))
return;
- if (pos[1] > 8)
+ if (ie_len > 8)
return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
- min_t(u8, pos[1], 8));
+ min_t(u8, ie_len, 8));
break;
case WLAN_EID_RSN:
- if (pos > end - sizeof(struct ieee_types_header))
+ if (ie_len < sizeof(struct ieee_types_header))
return;
- if (pos[1] < sizeof(struct ieee_types_header))
- return;
- if (pos[1] > IEEE_MAX_IE_SIZE -
+ if (ie_len > IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header))
return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
- min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
+ min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
- if (pos > end - 3)
- return;
- if (pos[1] != 1)
+ if (ie_len != sizeof(pos[2]))
return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
if (priv->adapter->is_hw_11ac_capable) {
- if (pos > end -
- sizeof(struct ieee80211_vht_operation) - 2)
- return;
- if (pos[1] !=
+ if (ie_len !=
sizeof(struct ieee80211_vht_operation))
return;
/* copy the ie's value into vhtoper*/
@@ -1043,10 +1029,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
- if (pos > end -
- sizeof(struct ieee80211_vht_cap) - 2)
- return;
- if (pos[1] != sizeof(struct ieee80211_vht_cap))
+ if (ie_len != sizeof(struct ieee80211_vht_cap))
return;
/* copy the ie's value into vhtcap*/
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
@@ -1056,9 +1039,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
break;
case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable) {
- if (pos > end - 4)
- return;
- if (pos[1] != 2)
+ if (ie_len != sizeof(u16))
return;
sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2));
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 53b5a4b2dcc5..59c187898132 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -281,8 +281,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
{
struct mt76_rx_tid *tid = NULL;
- rcu_swap_protected(wcid->aggr[tidno], tid,
- lockdep_is_held(&dev->mutex));
+ tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
+ lockdep_is_held(&dev->mutex));
if (tid) {
mt76_rx_aggr_shutdown(dev, tid);
kfree_rcu(tid, rcu_head);
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
index 55116f395f9a..a4a785467748 100644
--- a/drivers/net/wireless/mediatek/mt76/airtime.c
+++ b/drivers/net/wireless/mediatek/mt76/airtime.c
@@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
return 0;
sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx > sband->n_bitrates)
+ if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index b9f2a401041a..96018fd65779 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
- mt76_led_cleanup(dev);
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(dev);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 59d089e092f9..8849faa5bc10 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -1056,7 +1056,8 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req->alpha2[0], req->alpha2[1]);
- ret = qtnf_cmd_reg_notify(mac, req, qtnf_mac_slave_radar_get(wiphy));
+ ret = qtnf_cmd_reg_notify(mac, req, qtnf_slave_radar_get(),
+ qtnf_dfs_offload_get());
if (ret) {
pr_err("MAC%u: failed to update region to %c%c: %d\n",
mac->macid, req->alpha2[0], req->alpha2[1], ret);
@@ -1078,7 +1079,8 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
{
struct wiphy *wiphy;
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ if (qtnf_dfs_offload_get() &&
+ (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
@@ -1163,7 +1165,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_NETNS_OK;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ if (qtnf_dfs_offload_get() &&
+ (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 548f6ff6d0f2..d0d7ec8794c4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -257,6 +257,14 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
cmd->pbss = s->pbss;
cmd->ht_required = s->ht_required;
cmd->vht_required = s->vht_required;
+ cmd->twt_responder = s->twt_responder;
+ if (s->he_obss_pd.enable) {
+ cmd->sr_params.sr_control |= QLINK_SR_SRG_INFORMATION_PRESENT;
+ cmd->sr_params.srg_obss_pd_min_offset =
+ s->he_obss_pd.min_offset;
+ cmd->sr_params.srg_obss_pd_max_offset =
+ s->he_obss_pd.max_offset;
+ }
aen = &cmd->aen;
aen->auth_type = s->auth_type;
@@ -510,6 +518,8 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags |= RATE_INFO_FLAGS_MCS;
else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_HE_MCS)
+ rate_dst->flags |= RATE_INFO_FLAGS_HE_MCS;
if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
@@ -2454,7 +2464,7 @@ out:
}
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
- bool slave_radar)
+ bool slave_radar, bool dfs_offload)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct qtnf_bus *bus = mac->bus;
@@ -2517,6 +2527,7 @@ int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
}
cmd->slave_radar = slave_radar;
+ cmd->dfs_offload = dfs_offload;
cmd->num_channels = 0;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 761755bf9ede..ab273257b078 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -58,7 +58,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
- bool slave_radar);
+ bool slave_radar, bool dfs_offload);
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qtnf_chan_stats *stats);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 5fb598389487..4320180f8c07 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -21,8 +21,22 @@ static bool slave_radar = true;
module_param(slave_radar, bool, 0644);
MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
+static bool dfs_offload;
+module_param(dfs_offload, bool, 0644);
+MODULE_PARM_DESC(dfs_offload, "set 1 to enable DFS offload to firmware");
+
static struct dentry *qtnf_debugfs_dir;
+bool qtnf_slave_radar_get(void)
+{
+ return slave_radar;
+}
+
+bool qtnf_dfs_offload_get(void)
+{
+ return dfs_offload;
+}
+
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
@@ -156,7 +170,7 @@ static void qtnf_netdev_get_stats64(struct net_device *ndev,
/* Netdev handler for transmission timeout.
*/
-static void qtnf_netdev_tx_timeout(struct net_device *ndev)
+static void qtnf_netdev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
struct qtnf_wmac *mac;
@@ -211,9 +225,6 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
const struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
const struct qtnf_bus *bus = vif->mac->bus;
- if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE))
- return -EOPNOTSUPP;
-
ppid->id_len = sizeof(bus->hw_id);
memcpy(&ppid->id, bus->hw_id, ppid->id_len);
@@ -456,11 +467,6 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
-bool qtnf_mac_slave_radar_get(struct wiphy *wiphy)
-{
- return slave_radar;
-}
-
static const struct ethtool_ops qtnf_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
};
@@ -656,12 +662,24 @@ bool qtnf_netdev_is_qtn(const struct net_device *ndev)
return ndev->netdev_ops == &qtnf_netdev_ops;
}
+static int qtnf_check_br_ports(struct net_device *dev, void *data)
+{
+ struct net_device *ndev = data;
+
+ if (dev != ndev && netdev_port_same_parent_id(dev, ndev))
+ return -ENOTSUPP;
+
+ return 0;
+}
+
static int qtnf_core_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
const struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
struct qtnf_vif *vif;
+ struct qtnf_bus *bus;
int br_domain;
int ret = 0;
@@ -672,25 +690,34 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
vif = qtnf_netdev_get_priv(ndev);
+ bus = vif->mac->bus;
switch (event) {
case NETDEV_CHANGEUPPER:
info = ptr;
+ brdev = info->upper_dev;
- if (!netif_is_bridge_master(info->upper_dev))
+ if (!netif_is_bridge_master(brdev))
break;
pr_debug("[VIF%u.%u] change bridge: %s %s\n",
- vif->mac->macid, vif->vifid,
- netdev_name(info->upper_dev),
+ vif->mac->macid, vif->vifid, netdev_name(brdev),
info->linking ? "add" : "del");
- if (info->linking)
- br_domain = info->upper_dev->ifindex;
- else
- br_domain = ndev->ifindex;
+ if (IS_ENABLED(CONFIG_NET_SWITCHDEV) &&
+ (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)) {
+ if (info->linking)
+ br_domain = brdev->ifindex;
+ else
+ br_domain = ndev->ifindex;
+
+ ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
+ } else {
+ ret = netdev_walk_all_lower_dev(brdev,
+ qtnf_check_br_ports,
+ ndev);
+ }
- ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
break;
default:
break;
@@ -763,13 +790,11 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
- bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
- ret = register_netdevice_notifier(&bus->netdev_nb);
- if (ret) {
- pr_err("failed to register netdev notifier: %d\n", ret);
- goto error;
- }
+ bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
+ ret = register_netdevice_notifier(&bus->netdev_nb);
+ if (ret) {
+ pr_err("failed to register netdev notifier: %d\n", ret);
+ goto error;
}
bus->fw_state = QTNF_FW_STATE_RUNNING;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 116ec16aa15b..d715e1cd0006 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -133,7 +133,8 @@ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
-bool qtnf_mac_slave_radar_get(struct wiphy *wiphy);
+bool qtnf_slave_radar_get(void);
+bool qtnf_dfs_offload_get(void);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 75527f1bb306..b2edb03819d1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 15
+#define QLINK_PROTO_VER 16
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -196,6 +196,45 @@ struct qlink_sta_info_state {
__le32 value;
} __packed;
+/**
+ * enum qlink_sr_ctrl_flags - control flags for spatial reuse parameter set
+ *
+ * @QLINK_SR_PSR_DISALLOWED: indicates whether or not PSR-based spatial reuse
+ * transmissions are allowed for STAs associated with the AP
+ * @QLINK_SR_NON_SRG_OBSS_PD_SR_DISALLOWED: indicates whether or not
+ * Non-SRG OBSS PD spatial reuse transmissions are allowed for STAs associated
+ * with the AP
+ * @NON_SRG_OFFSET_PRESENT: indicates whether or not Non-SRG OBSS PD Max offset
+ * field is valid in the element
+ * @QLINK_SR_SRG_INFORMATION_PRESENT: indicates whether or not SRG OBSS PD
+ * Min/Max offset fields ore valid in the element
+ */
+enum qlink_sr_ctrl_flags {
+ QLINK_SR_PSR_DISALLOWED = BIT(0),
+ QLINK_SR_NON_SRG_OBSS_PD_SR_DISALLOWED = BIT(1),
+ QLINK_SR_NON_SRG_OFFSET_PRESENT = BIT(2),
+ QLINK_SR_SRG_INFORMATION_PRESENT = BIT(3),
+};
+
+/**
+ * struct qlink_sr_params - spatial reuse parameters
+ *
+ * @sr_control: spatial reuse control field; flags contained in this field are
+ * defined in @qlink_sr_ctrl_flags
+ * @non_srg_obss_pd_max: added to -82 dBm to generate the value of the
+ * Non-SRG OBSS PD Max parameter
+ * @srg_obss_pd_min_offset: added to -82 dBm to generate the value of the
+ * SRG OBSS PD Min parameter
+ * @srg_obss_pd_max_offset: added to -82 dBm to generate the value of the
+ * SRG PBSS PD Max parameter
+ */
+struct qlink_sr_params {
+ u8 sr_control;
+ u8 non_srg_obss_pd_max;
+ u8 srg_obss_pd_min_offset;
+ u8 srg_obss_pd_max_offset;
+} __packed;
+
/* QLINK Command messages related definitions
*/
@@ -596,8 +635,9 @@ enum qlink_user_reg_hint_type {
* of &enum qlink_user_reg_hint_type.
* @num_channels: number of &struct qlink_tlv_channel in a variable portion of a
* payload.
- * @slave_radar: whether slave device should enable radar detection.
* @dfs_region: one of &enum qlink_dfs_regions.
+ * @slave_radar: whether slave device should enable radar detection.
+ * @dfs_offload: enable or disable DFS offload to firmware.
* @info: variable portion of regulatory notifier callback.
*/
struct qlink_cmd_reg_notify {
@@ -608,7 +648,7 @@ struct qlink_cmd_reg_notify {
u8 num_channels;
u8 dfs_region;
u8 slave_radar;
- u8 rsvd[1];
+ u8 dfs_offload;
u8 info[0];
} __packed;
@@ -650,6 +690,8 @@ enum qlink_hidden_ssid {
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @aen: encryption info
+ * @sr_params: spatial reuse parameters
+ * @twt_responder: enable Target Wake Time
* @info: variable configurations
*/
struct qlink_cmd_start_ap {
@@ -665,6 +707,9 @@ struct qlink_cmd_start_ap {
u8 ht_required;
u8 vht_required;
struct qlink_auth_encr aen;
+ struct qlink_sr_params sr_params;
+ u8 twt_responder;
+ u8 rsvd[3];
u8 info[0];
} __packed;
@@ -948,6 +993,7 @@ enum qlink_sta_info_rate_flags {
QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
+ QLINK_STA_INFO_RATE_FLAG_HE_MCS = BIT(4),
};
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a36c3fea7495..6beac1f74e7c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1652,20 +1652,17 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, offset, reg);
}
- offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+ if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
+ return;
- if (crypto->cmd == SET_KEY) {
- rt2800_register_multiread(rt2x00dev, offset,
- &iveiv_entry, sizeof(iveiv_entry));
- if ((crypto->cipher == CIPHER_TKIP) ||
- (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
- (crypto->cipher == CIPHER_AES))
- iveiv_entry.iv[3] |= 0x20;
- iveiv_entry.iv[3] |= key->keyidx << 6;
- } else {
- memset(&iveiv_entry, 0, sizeof(iveiv_entry));
- }
+ offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+ memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ if ((crypto->cipher == CIPHER_TKIP) ||
+ (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+ (crypto->cipher == CIPHER_AES))
+ iveiv_entry.iv[3] |= 0x20;
+ iveiv_entry.iv[3] |= key->keyidx << 6;
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index a23c26574002..3868c07672bd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -311,6 +311,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800pci_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7b931bb96a9e..bbfe1425c0ee 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -156,6 +156,7 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800soc_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 0dfb55c69b73..4cc64fe481a7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -654,6 +654,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800usb_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index a90a518b40d3..ea8a34ecae14 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1439,6 +1439,8 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
+void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index c3eab767bc21..7f9e43a4f805 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1255,16 +1255,6 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
{
int retval = 0;
- if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
- /*
- * This is special case for ieee80211_restart_hw(), otherwise
- * mac80211 never call start() two times in row without stop();
- */
- set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
- rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
- rt2x00lib_stop(rt2x00dev);
- }
-
/*
* If this is the first interface which is added,
* we should load the firmware now.
@@ -1292,7 +1282,6 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
out:
- clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
return retval;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index beb20c5faf5f..32efbc8e9f92 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -165,6 +165,15 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
+ if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
+ /*
+ * This is special case for ieee80211_restart_hw(), otherwise
+ * mac80211 never call start() two times in row without stop();
+ */
+ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+ rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
+ rt2x00lib_stop(rt2x00dev);
+ }
return rt2x00lib_start(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_start);
@@ -180,6 +189,17 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
+void
+rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_reconfig_complete);
+
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index bc2dfef0de22..92e9e023c349 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
{
- rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
+ rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced reset\n",
queue->qid);
rt2x00queue_stop_queue(queue);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index aa2bb2ae9809..54a1a4ea107b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -6384,7 +6384,7 @@ static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
u8 dir, xtype, num;
int ret = 0;
- host_interface = &interface->altsetting[0];
+ host_interface = interface->cur_altsetting;
interface_desc = &host_interface->desc;
endpoints = interface_desc->bNumEndpoints;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index e4a7e074ae3f..fa92e29fffda 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -61,9 +61,9 @@ enum ap_peer {
CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
+ le32p_replace_bits((__le32 *)(__pdesc + 8), __val, BIT(19))
#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
+ le32p_replace_bits((__le32 *)(__pdesc + 24), __val, GENMASK(11, 0))
int rtl_init_core(struct ieee80211_hw *hw);
void rtl_deinit_core(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 3c96c320236c..658ff425c256 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -862,7 +862,7 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
/* Resume RF Rx LPF corner
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
- if (btcoexist->initilized) {
+ if (btcoexist->initialized) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 191dafd03189..a4940a3842de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1461,7 +1461,7 @@ void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
ex_btc8192e2ant_init_coex_dm(btcoexist);
}
- btcoexist->initilized = true;
+ btcoexist->initialized = true;
}
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 8c0a7fdbf200..a96a995dd850 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -679,7 +679,7 @@ struct btc_coexist {
bool auto_report_2ant;
bool dbg_mode_1ant;
bool dbg_mode_2ant;
- bool initilized;
+ bool initialized;
bool stop_coex_dm;
bool manual_control;
struct btc_statistics statistics;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index f88d26535978..25335bd2873b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1061,13 +1061,15 @@ done:
return ret;
}
-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_irq_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1193,10 +1195,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
- (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ _rtl_pci_irq_tasklet,
(unsigned long)hw);
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ _rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index e5e1ec5a41dc..bc0ac96ee615 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -737,7 +737,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
find_p2p_ie = true;
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
+ noa_len = le16_to_cpu(*((__le16 *)&ie[1]));
if (ie + 3 + ie[1] > end)
return;
@@ -766,16 +766,16 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
index = 5;
for (i = 0; i < noa_num; i++) {
p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie+index);
+ *(u8 *)(ie + index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
}
@@ -832,7 +832,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
+ noa_len = le16_to_cpu(*(__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -861,16 +861,16 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
index = 5;
for (i = 0; i < noa_num; i++) {
p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie+index);
+ *(u8 *)(ie + index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index e2e0bfbc24fe..fc7b9ad7e5d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -372,20 +372,20 @@ void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 rlbm, power_state = 0;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ set_h2ccmd_pwrmode_parm_mode(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM=2.*/
- SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
+ set_h2ccmd_pwrmode_parm_rlbm(u1_h2c_set_pwrmode, rlbm);
+ set_h2ccmd_pwrmode_parm_smart_ps(u1_h2c_set_pwrmode,
(rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1);
- SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
+ set_h2ccmd_pwrmode_parm_awake_interval(u1_h2c_set_pwrmode,
ppsc->reg_max_lps_awakeintvl);
- SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ set_h2ccmd_pwrmode_parm_all_queue_uapsd(u1_h2c_set_pwrmode, 0);
if (mode == FW_PS_ACTIVE_MODE)
power_state |= FW_PWR_STATE_ACTIVE;
else
power_state |= FW_PWR_STATE_RF_OFF;
- SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+ set_h2ccmd_pwrmode_parm_pwr_state(u1_h2c_set_pwrmode, power_state);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
index 39ddb7afea9d..79f095e47d71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
@@ -169,82 +169,55 @@ enum rtl8188e_h2c_cmd {
SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
-#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
-#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+static inline void set_h2ccmd_pwrmode_parm_mode(u8 *__ph2ccmd, u8 __val)
+{
+ *(u8 *)(__ph2ccmd) = __val;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_rlbm(u8 *__cmd, u8 __value)
+{
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(3, 0));
+}
+
+static inline void set_h2ccmd_pwrmode_parm_smart_ps(u8 *__cmd, u8 __value)
+{
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(7, 4));
+}
+
+static inline void set_h2ccmd_pwrmode_parm_awake_interval(u8 *__cmd, u8 __value)
+{
+ *(u8 *)(__cmd + 2) = __value;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_all_queue_uapsd(u8 *__cmd,
+ u8 __value)
+{
+ *(u8 *)(__cmd + 3) = __value;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_pwr_state(u8 *__cmd, u8 __value)
+{
+ *(u8 *)(__cmd + 4) = __value;
+}
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value;
#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value;
#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value;
#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-
-/* Keep Alive Control*/
-#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
-#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-
-/*REMOTE_WAKE_CTRL */
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
-#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
-#else
-#define SET_88E_H2_REM_WAKE_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#endif
-
-/* GTK_OFFLOAD */
-#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-
-/* AOAC_RSVDPAGE_LOC */
-#define SET_88E_H2CCMD_AOAC_RSVD_LOC_REM_WAKE_CTRL_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value;
int rtl88e_download_fw(struct ieee80211_hw *hw,
bool buse_wake_on_wlan_fw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index f92e95f5494f..70716631de85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -2486,13 +2486,12 @@ void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
- BIT_OFFSET_LEN_MASK_32(0, 1);
+ u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT(0);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT((1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2)));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
@@ -2502,11 +2501,11 @@ void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 5ca900f97d66..d13983ec09ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -264,7 +264,7 @@ static bool _rtl88e_check_condition(struct ieee80211_hw *hw,
u32 _board = rtlefuse->board_type; /*need efuse define*/
u32 _interface = rtlhal->interface;
u32 _platform = 0x08;/*SupportPlatform */
- u32 cond = condition;
+ u32 cond;
if (condition == 0xCDCDCDCD)
return true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index a0eda51e833c..4865639ac9ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -9,7 +9,6 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -59,7 +58,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -173,7 +172,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return err;
}
-void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -189,7 +188,7 @@ void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl88e_get_btc_status(void)
+static bool rtl88e_get_btc_status(void)
{
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
deleted file mode 100644
index 1407151503f8..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2013 Realtek Corporation.*/
-
-#ifndef __RTL92CE_SW_H__
-#define __RTL92CE_SW_H__
-
-int rtl88e_init_sw_vars(struct ieee80211_hw *hw);
-void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl88e_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 4bef237f488d..06fc9b5cdd8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -8,11 +8,11 @@
#include "../base.h"
#include "../core.h"
-#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
-#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
-#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
-#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
-#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_RSSI_STATE_NORMAL_POWER BIT(0)
+#define BT_RSSI_STATE_AMDPU_OFF BIT(1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT(2)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT(3)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT(4)
#define BT_MASK 0x00ffffff
#define RTLPRIV (struct rtl_priv *)
@@ -1515,7 +1515,7 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
polling == 0xffffffff && bt_state == 0xff)
return false;
- bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
+ bt_state &= BIT(0);
if (bt_state != rtlpriv->btcoexist.bt_cur_state) {
rtlpriv->btcoexist.bt_cur_state = bt_state;
@@ -1524,8 +1524,7 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
bt_state = bt_state |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
- BIT_OFFSET_LEN_MASK_32(2, 1);
+ 0 : BIT(1)) | BIT(2);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
}
return true;
@@ -1555,9 +1554,9 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
rtlpriv->btcoexist.bt_service = cur_service_type;
bt_state = bt_state |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT(1)) |
((rtlpriv->btcoexist.bt_service != BT_IDLE) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
index 888d9fc94bc2..706fc753dfe6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
@@ -46,19 +46,19 @@
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
int rtl92c_download_fw(struct ieee80211_hw *hw);
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index a52dd64d528d..6402a9e09be7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -2299,13 +2299,12 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
- BIT_OFFSET_LEN_MASK_32(0, 1);
+ u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT(0);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT(1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
@@ -2315,11 +2314,11 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 900788e4018c..ed68c850f9a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -14,7 +14,6 @@
#include "../rtl8192c/phy_common.h"
#include "hw.h"
#include "rf.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
@@ -65,7 +64,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
{
int err;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -161,7 +160,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
deleted file mode 100644
index f2d121a60159..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL92CE_SW_H__
-#define __RTL92CE_SW_H__
-
-int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
-void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl92c_init_var_map(struct ieee80211_hw *hw);
-bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index fc9a3aae047f..8fc3cb824066 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -23,45 +23,6 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw,
- long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
-
- return retsig;
-}
-
static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
@@ -194,7 +155,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
/* Translate DBM to percentage. */
- rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
/* Get Rx snr value in DB */
rtlpriv->stats.rx_snr_db[i] =
@@ -209,7 +170,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -241,11 +202,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (is_cck_rate)
pstats->signalstrength =
- (u8)(_rtl92ce_signal_scale_mapping(hw, pwdb_all));
+ (u8)(rtl_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8)(_rtl92ce_signal_scale_mapping
- (hw, total_rssi /= rf_rx_num));
+ (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num));
}
static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index cec19b32c7e2..b4b67341dc83 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -567,44 +567,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
/*==============================================================*/
-static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
- long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
- return retsig;
-}
-
static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *p_desc,
@@ -678,7 +640,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
break;
}
}
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
@@ -707,7 +669,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
rf_rx_num++;
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
- rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
@@ -716,7 +678,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -739,11 +701,10 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
}
if (is_cck_rate)
pstats->signalstrength =
- (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+ (u8)(rtl_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8) (_rtl92c_signal_scale_mapping
- (hw, total_rssi /= rf_rx_num));
+ (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num));
}
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index ab3e4aebad39..b53daf1b29f7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -12,7 +12,6 @@
#include "mac.h"
#include "dm.h"
#include "rf.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "hw.h"
@@ -252,45 +251,45 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
-#define USB_VENDER_ID_REALTEK 0x0bda
+#define USB_VENDOR_ID_REALTEK 0x0bda
/* 2010-10-19 DID_USB_V3.4 */
static const struct usb_device_id rtl8192c_usb_ids[] = {
/*=== Realtek demoboard ===*/
/* Default ID */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
/* RTL8188CTV */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle, (b/g mode only) */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8188cu Slim Solo */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
/* 8188cu Slim Combo */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
/* 8188RU High-power USB Dongle */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard (b/g mode only) */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* RTL8188CUS-VL */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
/* 8192cu 2*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
/*=== Customer ID ===*/
/****** 8188CU ********/
@@ -329,7 +328,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
/****** 8188 RU ********/
/* Netcore */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
/****** 8188CUS Slim Solo********/
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
deleted file mode 100644
index 662440c4cdc9..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL92CU_SW_H__
-#define __RTL92CU_SW_H__
-
-#define EFUSE_MAX_SECTION 16
-
-void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *powerlevel);
-void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
-void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
-bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
-u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr, u32 bitmask);
-void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index bd8ea6d66dff..7f0a17c1a9ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -16,73 +16,26 @@
(GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
(GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
-/* Define a macro that takes an le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
/* Firmware Header(8-byte alinment required) */
/* --- LONG WORD 0 ---- */
#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 0, 16)
-#define GET_FIRMWARE_HDR_CATEGORY(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 16, 8)
-#define GET_FIRMWARE_HDR_FUNCTION(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 24, 8)
+ le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 0, 16)
+ le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(15, 0))
#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 16, 8)
-#define GET_FIRMWARE_HDR_RSVD1(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 24, 8)
-
-/* --- LONG WORD 1 ---- */
-#define GET_FIRMWARE_HDR_MONTH(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 0, 8)
-#define GET_FIRMWARE_HDR_DATE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 8, 8)
-#define GET_FIRMWARE_HDR_HOUR(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 16, 8)
-#define GET_FIRMWARE_HDR_MINUTE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 24, 8)
-#define GET_FIRMWARE_HDR_ROMCODE_SIZE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 12, 0, 16)
-#define GET_FIRMWARE_HDR_RSVD2(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 12, 16, 16)
-
-/* --- LONG WORD 2 ---- */
-#define GET_FIRMWARE_HDR_SVN_IDX(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 16, 0, 32)
-#define GET_FIRMWARE_HDR_RSVD3(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 20, 0, 32)
-
-/* --- LONG WORD 3 ---- */
-#define GET_FIRMWARE_HDR_RSVD4(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 24, 0, 32)
-#define GET_FIRMWARE_HDR_RSVD5(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 28, 0, 32)
+ le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(23, 16))
#define pagenum_128(_len) \
(u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 92c9fb45f800..ab5b05ef168e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -23,16 +23,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92d_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
u8 signal_strength_index)
{
@@ -43,33 +33,6 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
return signal_power;
}
-static long _rtl92de_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
- return retsig;
-}
-
static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92d *pdesc,
@@ -141,7 +104,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
break;
}
}
- pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
/* CCK gain is smaller than OFDM/MCS gain, */
/* so we add gain diff by experiences, the val is 6 */
pwdb_all += 6;
@@ -183,7 +146,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
rf_rx_num++;
rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
- 110;
- rssi = _rtl92d_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
@@ -191,7 +154,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
- pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -212,10 +175,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
}
}
if (is_cck_rate)
- pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
pwdb_all));
else if (rf_rx_num != 0)
- pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
total_rssi /= rf_rx_num));
}
@@ -438,49 +401,50 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
- u8 *p_desc, struct sk_buff *skb)
+ u8 *pdesc8, struct sk_buff *skb)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
struct rx_fwinfo_92d *p_drvinfo;
- struct rx_desc_92d *pdesc = (struct rx_desc_92d *)p_desc;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ u32 phystatus = get_rx_desc_physt(pdesc);
- stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ stats->length = (u16)get_rx_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
- stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_desc_crc32(pdesc);
stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
- stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
- stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ stats->decrypted = !get_rx_desc_swdec(pdesc);
+ stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- if (GET_RX_DESC_CRC32(pdesc))
+ if (get_rx_desc_crc32(pdesc))
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (!GET_RX_DESC_SWDEC(pdesc))
+ if (!get_rx_desc_swdec(pdesc))
rx_status->flag |= RX_FLAG_DECRYPTED;
- if (GET_RX_DESC_BW(pdesc))
+ if (get_rx_desc_bw(pdesc))
rx_status->bw = RATE_INFO_BW_40;
- if (GET_RX_DESC_RXHT(pdesc))
+ if (get_rx_desc_rxht(pdesc))
rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
false, stats->rate);
- rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ rx_status->mactime = get_rx_desc_tsfl(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
stats->rx_bufshift);
_rtl92de_translate_rx_signal_stuff(hw,
- skb, stats, pdesc,
+ skb, stats,
+ (struct rx_desc_92d *)pdesc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
@@ -489,21 +453,23 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
+ u8 *virtualaddress8)
{
+ __le32 *virtualaddress = (__le32 *)virtualaddress8;
+
memset(virtualaddress, 0, 8);
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
- SET_EARLYMODE_LEN0(virtualaddress, ptcb_desc->empkt_len[0]);
- SET_EARLYMODE_LEN1(virtualaddress, ptcb_desc->empkt_len[1]);
- SET_EARLYMODE_LEN2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
- SET_EARLYMODE_LEN3(virtualaddress, ptcb_desc->empkt_len[3]);
- SET_EARLYMODE_LEN4(virtualaddress, ptcb_desc->empkt_len[4]);
+ set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num);
+ set_earlymode_len0(virtualaddress, ptcb_desc->empkt_len[0]);
+ set_earlymode_len1(virtualaddress, ptcb_desc->empkt_len[1]);
+ set_earlymode_len2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
+ set_earlymode_len2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
+ set_earlymode_len3(virtualaddress, ptcb_desc->empkt_len[3]);
+ set_earlymode_len4(virtualaddress, ptcb_desc->empkt_len[4]);
}
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -514,7 +480,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -549,15 +515,15 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92d));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ set_tx_desc_pkt_offset(pdesc, 1);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
@@ -567,60 +533,61 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
(u8 *)(skb->data));
}
} else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
}
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
if (ptcb_desc->hw_rate < DESC_RATE6M)
ptcb_desc->hw_rate = DESC_RATE6M;
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
ptcb_desc->hw_rate == DESC_RATEMCS7)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_enable(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable
+ set_tx_desc_seq(pdesc, seq_number);
+ set_tx_desc_rts_enable(pdesc,
+ ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_hw_rts_enable(pdesc, ((ptcb_desc->rts_enable
|| ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
if (ptcb_desc->rts_rate < DESC_RATE6M)
ptcb_desc->rts_rate = DESC_RATE6M;
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <=
DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
if (ptcb_desc->packet_bw) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf;
@@ -630,66 +597,66 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_PKT_ID(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ set_tx_desc_pkt_id(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ?
1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
/* Set TxRate and RTSRate in TxDesc */
/* This prevent Tx initial rate of new-coming packets */
/* from being overwritten by retried packet rate.*/
if (!ptcb_desc->use_driver_rate) {
- SET_TX_DESC_RTS_RATE(pdesc, 0x08);
- /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ set_tx_desc_rts_rate(pdesc, 0x08);
+ /* set_tx_desc_tx_rate(pdesc, 0x0b); */
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->ratr_index);
}
if (ieee80211_is_data_qos(fc))
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -701,61 +668,64 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
skb->data, skb->len, PCI_DMA_TODEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
+ __le32 *pdesc = (__le32 *)pdesc8;
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
/* 5G have no CCK rate
* Caution: The macros below are multi-line expansions.
* The braces are needed no matter what checkpatch says
*/
if (rtlhal->current_bandtype == BAND_ON_5G) {
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE6M);
} else {
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE1M);
}
- SET_TX_DESC_SEQ(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_seq(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
+ set_tx_desc_offset(pdesc, 0x20);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
}
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
@@ -766,16 +736,16 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
@@ -786,17 +756,18 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc, bool istx, u8 desc_name)
+ u8 *p_desc8, bool istx, u8 desc_name)
{
+ __le32 *p_desc = (__le32 *)p_desc8;
u32 ret = 0;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(p_desc);
+ ret = get_tx_desc_own(p_desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ ret = get_tx_desc_tx_buffer_address(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
@@ -806,13 +777,13 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(p_desc);
+ ret = get_rx_desc_own(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(p_desc);
- break;
+ ret = get_rx_desc_pkt_len(p_desc);
+ break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(p_desc);
+ ret = get_rx_desc_buff_addr(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 635989e15282..d01578875cd5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -14,514 +14,359 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
-#define SET_TX_DESC_BK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 26, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
-#define GET_TX_DESC_PKT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
-#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
-#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
-#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
-#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
-#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
-#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_RX_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
-#define GET_RX_DESC_HWRSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
-#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
-#define GET_RX_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
-#define GET_RX_DESC_RSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
-#define GET_RX_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
-#define GET_RX_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
-#define GET_RX_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
- memset((void *)__pdesc, 0, \
- min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(13, 0));
+}
+
+static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(14));
+}
+
+static inline u32 get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(15));
+}
+
+static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(19, 16));
+}
+
+static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(25, 24));
+}
+
+static inline u32 get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(26));
+}
+
+static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(27));
+}
+
+static inline u32 get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size)
+{
+ memset((void *)__pdesc, 0,
+ min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
+}
/* For 92D early mode */
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 0, 3, __value)
-#define SET_EARLYMODE_LEN0(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 4, 12, __value)
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 16, 12, __value)
-#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 28, 4, __value)
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 0, 8, __value)
-#define SET_EARLYMODE_LEN3(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 8, 12, __value)
-#define SET_EARLYMODE_LEN4(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 20, 12, __value)
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
+}
struct rx_fwinfo_92d {
u8 gain_trsw[4];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 648f9108ed4b..551aa86825ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -12,124 +12,6 @@
#include "fw.h"
#include "trx.h"
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c, /* 37, -12.5dB */
- 0x0e400039, /* 38, -13.0dB */
- 0x0d800036, /* 39, -13.5dB */
- 0x0cc00033, /* 40, -14.0dB */
- 0x0c000030, /* 41, -14.5dB */
- 0x0b40002d, /* 42, -15.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
-};
-
static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index cbfecea232a3..27ac4f8b9a9b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -111,44 +111,40 @@ enum rtl8192e_h2c_cmd {
(u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __val)
+ u8p_replace_bits(__cmd + 1, __val, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __val)
+ u8p_replace_bits(__cmd + 1, __val, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __val)
+ *(u8 *)(__cmd + 2) = __val;
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __val)
+ *(u8 *)(__cmd + 3) = __val;
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __val)
+ *(u8 *)(__cmd + 4) = __val;
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __val)
-#define GET_92E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+ *(u8 *)(__cmd + 5) = __val;
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val;
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __val)
+ u8p_replace_bits(__cmd, __val, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __val)
+ u8p_replace_bits(__cmd, __val, BIT(1))
#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw);
void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index b6ee7dae5943..b337d599b6f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -9,7 +9,6 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
-#include "sw.h"
#include "fw.h"
#include "trx.h"
#include "led.h"
@@ -65,7 +64,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -164,7 +163,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -175,7 +174,7 @@ void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl92ee_get_btc_status(void)
+static bool rtl92ee_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
deleted file mode 100644
index 36e29a2da0fd..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2014 Realtek Corporation.*/
-
-#ifndef __RTL92E_SW_H__
-#define __RTL92E_SW_H__
-
-int rtl92ee_init_sw_vars(struct ieee80211_hw *hw);
-void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl92ee_get_btc_status(void);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 1c7ee569f4bf..7a54497b7df2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -11,7 +11,6 @@
#include "dm.h"
#include "fw.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
deleted file mode 100644
index a31efba0e6b5..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __REALTEK_PCI92SE_SW_H__
-#define __REALTEK_PCI92SE_SW_H__
-
-#define EFUSE_MAX_SECTION 16
-
-int rtl92se_init_sw(struct ieee80211_hw *hw);
-void rtl92se_deinit_sw(struct ieee80211_hw *hw);
-void rtl92se_init_var_map(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index d8260c7afe09..c61a92df9d73 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -13,118 +13,6 @@
#include "fw.h"
#include "hal_btc.h"
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe,
- 0x788001e2,
- 0x71c001c7,
- 0x6b8001ae,
- 0x65400195,
- 0x5fc0017f,
- 0x5a400169,
- 0x55400155,
- 0x50800142,
- 0x4c000130,
- 0x47c0011f,
- 0x43c0010f,
- 0x40000100,
- 0x3c8000f2,
- 0x390000e4,
- 0x35c000d7,
- 0x32c000cb,
- 0x300000c0,
- 0x2d4000b5,
- 0x2ac000ab,
- 0x288000a2,
- 0x26000098,
- 0x24000090,
- 0x22000088,
- 0x20000080,
- 0x1e400079,
- 0x1c800072,
- 0x1b00006c,
- 0x19800066,
- 0x18000060,
- 0x16c0005b,
- 0x15800056,
- 0x14400051,
- 0x1300004c,
- 0x12000048,
- 0x11000044,
- 0x10000040,
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
-};
-
static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
index 5c843736de8d..3f9ed9b4428e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
@@ -18,19 +18,19 @@
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 5702ac6deebf..ea86d5bf33d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -11,7 +11,6 @@
#include "fw.h"
#include "../rtl8723com/fw_common.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -67,7 +66,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -166,7 +165,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -177,7 +176,7 @@ void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8723e_get_btc_status(void)
+static bool rtl8723e_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
deleted file mode 100644
index 200ce39a0dd7..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL8723E_SW_H__
-#define __RTL8723E_SW_H__
-
-int rtl8723e_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8723e_init_var_map(struct ieee80211_hw *hw);
-bool rtl8723e_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index 97ea77464139..7c5e5e916274 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -83,37 +83,35 @@ enum rtl8723b_h2c_cmd {
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
+ u8p_replace_bits(__ph2ccmd + 1, __val, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
+ u8p_replace_bits(__ph2ccmd + 1, __val, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 5, 0, 8, __val)
-#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
- LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
+ *(u8 *)(__ph2ccmd + 5) = __val
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+ u8p_replace_bits(__ph2ccmd, __val, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+ u8p_replace_bits(__ph2ccmd, __val, BIT(1))
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 3c8528f0ecb3..36209ac5b208 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -13,7 +13,6 @@
#include "hw.h"
#include "fw.h"
#include "../rtl8723com/fw_common.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -64,7 +63,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -170,7 +169,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -181,7 +180,7 @@ void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8723be_get_btc_status(void)
+static bool rtl8723be_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
deleted file mode 100644
index 6ecacf9fbfd7..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2014 Realtek Corporation.*/
-
-#ifndef __RTL8723BE_SW_H__
-#define __RTL8723BE_SW_H__
-
-int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8723be_init_var_map(struct ieee80211_hw *hw);
-bool rtl8723be_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index b54230433a6b..f57e8794f0ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -93,124 +93,6 @@ static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = {
0x3FE /* 36, +6.0dB */
};
-static const u32 ofdmswing_table[] = {
- 0x0b40002d, /* 0, -15.0dB */
- 0x0c000030, /* 1, -14.5dB */
- 0x0cc00033, /* 2, -14.0dB */
- 0x0d800036, /* 3, -13.5dB */
- 0x0e400039, /* 4, -13.0dB */
- 0x0f00003c, /* 5, -12.5dB */
- 0x10000040, /* 6, -12.0dB */
- 0x11000044, /* 7, -11.5dB */
- 0x12000048, /* 8, -11.0dB */
- 0x1300004c, /* 9, -10.5dB */
- 0x14400051, /* 10, -10.0dB */
- 0x15800056, /* 11, -9.5dB */
- 0x16c0005b, /* 12, -9.0dB */
- 0x18000060, /* 13, -8.5dB */
- 0x19800066, /* 14, -8.0dB */
- 0x1b00006c, /* 15, -7.5dB */
- 0x1c800072, /* 16, -7.0dB */
- 0x1e400079, /* 17, -6.5dB */
- 0x20000080, /* 18, -6.0dB */
- 0x22000088, /* 19, -5.5dB */
- 0x24000090, /* 20, -5.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x288000a2, /* 22, -4.0dB */
- 0x2ac000ab, /* 23, -3.5dB */
- 0x2d4000b5, /* 24, -3.0dB */
- 0x300000c0, /* 25, -2.5dB */
- 0x32c000cb, /* 26, -2.0dB */
- 0x35c000d7, /* 27, -1.5dB */
- 0x390000e4, /* 28, -1.0dB */
- 0x3c8000f2, /* 29, -0.5dB */
- 0x40000100, /* 30, +0dB */
- 0x43c0010f, /* 31, +0.5dB */
- 0x47c0011f, /* 32, +1.0dB */
- 0x4c000130, /* 33, +1.5dB */
- 0x50800142, /* 34, +2.0dB */
- 0x55400155, /* 35, +2.5dB */
- 0x5a400169, /* 36, +3.0dB */
- 0x5fc0017f, /* 37, +3.5dB */
- 0x65400195, /* 38, +4.0dB */
- 0x6b8001ae, /* 39, +4.5dB */
- 0x71c001c7, /* 40, +5.0dB */
- 0x788001e2, /* 41, +5.5dB */
- 0x7f8001fe /* 42, +6.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
-};
-
static const u32 edca_setting_dl[PEER_MAX] = {
0xa44f, /* 0 UNKNOWN */
0x5ea44f, /* 1 REALTEK_90 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index e11e496b7277..c269942b3f4a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -151,123 +151,115 @@ enum rtl8821a_h2c_cmd {
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(2))
#define SET_8812_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(3))
#define SET_8812_H2CCMD_WOWLAN_ALL_PKT_DROP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 4, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(4))
#define SET_8812_H2CCMD_WOWLAN_GPIO_ACTIVE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 5, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(5))
#define SET_8812_H2CCMD_WOWLAN_REKEY_WAKE_UP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(6))
#define SET_8812_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 7, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(7))
#define SET_8812_H2CCMD_WOWLAN_GPIONUM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_8812_H2CCMD_WOWLAN_GPIO_DURATION(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 4) = __value
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __value)
-#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+ *(u8 *)(__cmd + 5) = __value
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd + 1, __value, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __value)
-#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __value)
+ u8p_replace_bits(__cmd + 1, __value, BIT(1))
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
/* Keep Alive Control*/
#define SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
/*REMOTE_WAKE_CTRL */
#define SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(2))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(3))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_REALWOWV2_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(6))
/* GTK_OFFLOAD */
#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
/* AOAC_RSVDPAGE_LOC */
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 4) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+5, 0, 8, __value)
+ *(u8 *)(__cmd + 5) = __value
/* Disconnect_Decision_Control */
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_USER_SETTING(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_CHECK_PERIOD(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) /* unit: beacon period */
+ *(u8 *)(__cmd + 1) = __value
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_TRYPKT_NUM(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw);
#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 3def6a2b3450..d8df816753cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -10,7 +10,6 @@
#include "dm.h"
#include "hw.h"
#include "fw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -65,7 +64,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
}
/*InitializeVariables8812E*/
-int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -211,7 +210,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -228,7 +227,7 @@ void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8821ae_get_btc_status(void)
+static bool rtl8821ae_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
deleted file mode 100644
index 9d7610f84b20..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2010 Realtek Corporation.*/
-
-#ifndef __RTL8821AE_SW_H__
-#define __RTL8821AE_SW_H__
-
-int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8821ae_init_var_map(struct ieee80211_hw *hw);
-bool rtl8821ae_get_btc_status(void);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 3bdda1c98339..1cff9f07c9e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2898,121 +2898,6 @@ enum bt_radio_shared {
* 2. Before write integer to IO.
* 3. After read integer from IO.
****************************************/
-/* Convert little data endian to host ordering */
-#define EF1BYTE(_val) \
- ((u8)(_val))
-#define EF2BYTE(_val) \
- (le16_to_cpu(_val))
-#define EF4BYTE(_val) \
- (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen) \
- (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen) \
- (0xFFFF >> (16 - (__bitlen)))
-#define BIT_LEN_MASK_8(__bitlen) \
- (0xFF >> (8 - (__bitlen)))
-
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
-/*Description:
- * Return 4-byte value in host byte ordering from
- * 4-byte pointer in little-endian system.
- */
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((__le16 *)(__pstart))))
-#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
- (EF1BYTE(*((u8 *)(__pstart))))
-
-/*Description:
- * Translate subfield (continuous bits in little-endian) of 4-byte
- * value to host byte ordering.
- */
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_8(__bitlen) \
- )
-
-/* Description:
- * Mask subfield (continuous bits in little-endian) of 4-byte value
- * and return the result in 4-byte value in host byte ordering.
- */
-#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
- )
-
-/* Description:
- * Set subfield of little-endian 4-byte value to specified value.
- */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((__le32 *)(__pstart)) = \
- cpu_to_le32( \
- LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
- )
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((__le16 *)(__pstart)) = \
- cpu_to_le16( \
- LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
- )
-#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u8 *)(__pstart)) = EF1BYTE \
- ( \
- LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
- )
#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
(__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 15e12155a04c..cac148d13cf1 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -15,6 +15,7 @@ rtw88-y += main.o \
ps.o \
sec.o \
bf.o \
+ wow.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index cd28f675e9cb..a0f36f29b4a6 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -18,6 +18,7 @@ enum rtw_debug_mask {
RTW_DBG_DEBUGFS = 0x00000200,
RTW_DBG_PS = 0x00000400,
RTW_DBG_BF = 0x00000800,
+ RTW_DBG_WOW = 0x00001000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index b8c581161f61..243441453ead 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -10,6 +10,7 @@
#include "sec.h"
#include "debug.h"
#include "util.h"
+#include "wow.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -482,6 +483,96 @@ void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ struct rtw_fw_wow_keep_alive_para mode = {
+ .adopt = true,
+ .pkt_type = KEEP_ALIVE_NULL_PKT,
+ .period = 5,
+ };
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
+ SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
+ SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
+ SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
+ SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ struct rtw_fw_wow_disconnect_para mode = {
+ .adopt = true,
+ .period = 30,
+ .retry_count = 5,
+ };
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
+
+ if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
+ SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
+ SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
+ SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
+ SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
+
+ SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
+ SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
+ if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
+ SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
+ if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
+ SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
+ if (rtw_wow->pattern_cnt)
+ SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
+ u8 pairwise_key_enc,
+ u8 group_key_enc)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
+
+ SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
+ SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
+
+ SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
+
+ if (rtw_wow_no_link(rtwdev))
+ SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
enum rtw_rsvd_packet_type type)
{
@@ -496,6 +587,26 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
return location;
}
+void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 loc_nlo;
+
+ loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
+
+ SET_NLO_FUN_EN(h2c_pkt, enable);
+ if (enable) {
+ if (rtw_fw_lps_deep_mode)
+ SET_NLO_PS_32K(h2c_pkt, enable);
+ SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
+ SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -510,10 +621,45 @@ void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
+ LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u8 location = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type != RSVD_PROBE_REQ)
+ continue;
+ if ((!ssid && !rsvd_pkt->ssid) ||
+ rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ location = rsvd_pkt->page;
+ }
+
+ return location;
+}
+
+u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u16 size = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type != RSVD_PROBE_REQ)
+ continue;
+ if ((!ssid && !rsvd_pkt->ssid) ||
+ rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ size = rsvd_pkt->skb->len;
+ }
+
+ return size;
+}
+
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -559,6 +705,95 @@ rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return skb_new;
}
+static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
+ struct rtw_nlo_info_hdr *nlo_hdr;
+ struct cfg80211_ssid *ssid;
+ struct sk_buff *skb;
+ u8 *pos, loc;
+ u32 size;
+ int i;
+
+ if (!pno_req->inited || !pno_req->match_set_cnt)
+ return NULL;
+
+ size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
+ IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+
+ nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
+
+ nlo_hdr->nlo_count = pno_req->match_set_cnt;
+ nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
+
+ /* pattern check for firmware */
+ memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
+
+ for (i = 0; i < pno_req->match_set_cnt; i++)
+ nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
+
+ for (i = 0; i < pno_req->match_set_cnt; i++) {
+ ssid = &pno_req->match_sets[i].ssid;
+ loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
+ if (!loc) {
+ rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
+ kfree(skb);
+ return NULL;
+ }
+ nlo_hdr->location[i] = loc;
+ }
+
+ for (i = 0; i < pno_req->match_set_cnt; i++) {
+ pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
+ memcpy(pos, pno_req->match_sets[i].ssid.ssid,
+ pno_req->match_sets[i].ssid.ssid_len);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
+ struct ieee80211_channel *channels = pno_req->channels;
+ struct sk_buff *skb;
+ int count = pno_req->channel_cnt;
+ u8 *pos;
+ int i = 0;
+
+ skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+
+ for (i = 0; i < count; i++) {
+ pos = skb_put_zero(skb, 4);
+
+ CHSW_INFO_SET_CH(pos, channels[i].hw_value);
+
+ if (channels[i].flags & IEEE80211_CHAN_RADAR)
+ CHSW_INFO_SET_ACTION_ID(pos, 0);
+ else
+ CHSW_INFO_SET_ACTION_ID(pos, 1);
+ CHSW_INFO_SET_TIMEOUT(pos, 1);
+ CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
+ CHSW_INFO_SET_BW(pos, 0);
+ }
+
+ return skb;
+}
+
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -591,6 +826,7 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_lps_pg_info_hdr *pg_info_hdr;
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
struct sk_buff *skb;
u32 size;
@@ -605,19 +841,22 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
pg_info_hdr->sec_cam_count =
rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
+ pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
+ conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
return skb;
}
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum rtw_rsvd_packet_type type)
+ struct rtw_rsvd_page *rsvd_pkt)
{
struct sk_buff *skb_new;
+ struct cfg80211_ssid *ssid;
- switch (type) {
+ switch (rsvd_pkt->type) {
case RSVD_BEACON:
skb_new = rtw_beacon_get(hw, vif);
break;
@@ -639,6 +878,21 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
case RSVD_LPS_PG_INFO:
skb_new = rtw_lps_pg_info_get(hw, vif);
break;
+ case RSVD_PROBE_REQ:
+ ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
+ if (ssid)
+ skb_new = ieee80211_probereq_get(hw, vif->addr,
+ ssid->ssid,
+ ssid->ssid_len, 0);
+ else
+ skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
+ break;
+ case RSVD_NLO_INFO:
+ skb_new = rtw_nlo_info_get(hw);
+ break;
+ case RSVD_CH_INFO:
+ skb_new = rtw_cs_channel_info_get(hw);
+ break;
default:
return NULL;
}
@@ -680,25 +934,53 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
memcpy(buf, skb->data, skb->len);
}
+static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
+ enum rtw_rsvd_packet_type type,
+ bool txdesc)
+{
+ struct rtw_rsvd_page *rsvd_pkt = NULL;
+
+ rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+
+ if (!rsvd_pkt)
+ return NULL;
+
+ rsvd_pkt->type = type;
+ rsvd_pkt->add_txdesc = txdesc;
+
+ return rsvd_pkt;
+}
+
+static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_rsvd_page *rsvd_pkt)
+{
+ lockdep_assert_held(&rtwdev->mutex);
+ list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+}
+
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc)
{
struct rtw_rsvd_page *rsvd_pkt;
- lockdep_assert_held(&rtwdev->mutex);
+ rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
+ if (!rsvd_pkt)
+ return;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- if (rsvd_pkt->type == type)
- return;
- }
+ rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+}
- rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
if (!rsvd_pkt)
return;
- rsvd_pkt->type = type;
- rsvd_pkt->add_txdesc = txdesc;
- list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+ rsvd_pkt->ssid = ssid;
+ rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
}
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
@@ -795,7 +1077,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
page_margin = page_size - tx_desc_sz;
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
+ iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt);
if (!iter) {
rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
@@ -857,13 +1139,16 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
kfree_skb(rsvd_pkt->skb);
+ rsvd_pkt->skb = NULL;
}
return buf;
release_skb:
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
kfree_skb(rsvd_pkt->skb);
+ rsvd_pkt->skb = NULL;
+ }
return NULL;
}
@@ -973,3 +1258,81 @@ out:
rtw_write8(rtwdev, REG_RCR + 2, rcr);
return 0;
}
+
+static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
+ u8 location)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
+ UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
+
+ /* include txdesc size */
+ UPDATE_PKT_SET_SIZE(h2c_pkt, size);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ u8 loc;
+ u32 size;
+
+ loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
+ if (!loc) {
+ rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
+ return;
+ }
+
+ size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
+ if (!size) {
+ rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
+ return;
+ }
+
+ __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
+}
+
+void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
+ u8 loc_ch_info;
+ const struct rtw_ch_switch_option cs_option = {
+ .dest_ch_en = 1,
+ .dest_ch = 1,
+ .periodic_option = 2,
+ .normal_period = 5,
+ .normal_period_sel = 0,
+ .normal_cycle = 10,
+ .slow_period = 1,
+ .slow_period_sel = 1,
+ };
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+
+ CH_SWITCH_SET_START(h2c_pkt, enable);
+ CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
+ CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
+ CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
+ CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
+ CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
+ CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
+ CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
+ CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
+
+ CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
+ CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
+
+ loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
+ CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 73d1b9ca8efc..ccd27bd45775 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -12,6 +12,8 @@
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
+#define FW_NLO_INFO_CHECK_SIZE 4
+
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
#define RSVD_PAGE_START_ADDR 0x780
@@ -45,6 +47,9 @@ enum rtw_rsvd_packet_type {
RSVD_QOS_NULL,
RSVD_LPS_PG_DPK,
RSVD_LPS_PG_INFO,
+ RSVD_PROBE_REQ,
+ RSVD_NLO_INFO,
+ RSVD_CH_INFO,
};
enum rtw_fw_rf_type {
@@ -98,6 +103,57 @@ struct rtw_rsvd_page {
enum rtw_rsvd_packet_type type;
u8 page;
bool add_txdesc;
+ struct cfg80211_ssid *ssid;
+};
+
+enum rtw_keep_alive_pkt_type {
+ KEEP_ALIVE_NULL_PKT = 0,
+ KEEP_ALIVE_ARP_RSP = 1,
+};
+
+struct rtw_nlo_info_hdr {
+ u8 nlo_count;
+ u8 hidden_ap_count;
+ u8 rsvd1[2];
+ u8 pattern_check[FW_NLO_INFO_CHECK_SIZE];
+ u8 rsvd2[8];
+ u8 ssid_len[16];
+ u8 chiper[16];
+ u8 rsvd3[16];
+ u8 location[8];
+} __packed;
+
+enum rtw_packet_type {
+ RTW_PACKET_PROBE_REQ = 0x00,
+
+ RTW_PACKET_UNDEFINE = 0x7FFFFFFF,
+};
+
+struct rtw_fw_wow_keep_alive_para {
+ bool adopt;
+ u8 pkt_type;
+ u8 period; /* unit: sec */
+};
+
+struct rtw_fw_wow_disconnect_para {
+ bool adopt;
+ u8 period; /* unit: sec */
+ u8 retry_count;
+};
+
+struct rtw_ch_switch_option {
+ u8 periodic_option;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 dest_ch_en;
+ u8 absolute_time_en;
+ u8 dest_ch;
+ u8 normal_period;
+ u8 normal_period_sel;
+ u8 normal_cycle;
+ u8 slow_period;
+ u8 slow_period_sel;
+ u8 nlo_en;
};
struct rtw_fw_hdr {
@@ -146,6 +202,12 @@ struct rtw_fw_hdr {
#define H2C_PKT_PHYDM_INFO 0x11
#define H2C_PKT_IQK 0x0E
+#define H2C_PKT_CH_SWITCH 0x02
+#define H2C_PKT_UPDATE_PKT 0x0C
+
+#define H2C_PKT_CH_SWITCH_LEN 0x20
+#define H2C_PKT_UPDATE_PKT_LEN 0x4
+
#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0))
#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \
@@ -182,6 +244,57 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define CHSW_INFO_SET_CH(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(7, 0))
+#define CHSW_INFO_SET_PRI_CH_IDX(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(11, 8))
+#define CHSW_INFO_SET_BW(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(15, 12))
+#define CHSW_INFO_SET_TIMEOUT(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(23, 16))
+#define CHSW_INFO_SET_ACTION_ID(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(30, 24))
+
+#define UPDATE_PKT_SET_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 0))
+#define UPDATE_PKT_SET_PKT_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define UPDATE_PKT_SET_LOCATION(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 24))
+
+#define CH_SWITCH_SET_START(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
+#define CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define CH_SWITCH_SET_ABSOLUTE_TIME(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2))
+#define CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(4, 3))
+#define CH_SWITCH_SET_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
+#define CH_SWITCH_SET_CH_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define CH_SWITCH_SET_PRI_CH_IDX(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
+#define CH_SWITCH_SET_DEST_CH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0))
+#define CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(13, 8))
+#define CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 14))
+#define CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(21, 16))
+#define CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 22))
+#define CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(31, 24))
+#define CH_SWITCH_SET_TSF_HIGH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 0))
+#define CH_SWITCH_SET_TSF_LOW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(31, 0))
+#define CH_SWITCH_SET_INFO_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x06, value, GENMASK(15, 0))
+
/* Command H2C */
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
@@ -198,6 +311,13 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
+#define H2C_CMD_KEEP_ALIVE 0x03
+#define H2C_CMD_DISCONNECT_DECISION 0x04
+#define H2C_CMD_WOWLAN 0x80
+#define H2C_CMD_REMOTE_WAKE_CTRL 0x81
+#define H2C_CMD_AOAC_GLOBAL_INFO 0x82
+#define H2C_CMD_NLO_INFO 0x8C
+
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
@@ -224,6 +344,8 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define LPS_PG_PATTERN_CAM_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
@@ -301,6 +423,56 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+
+#define SET_WOWLAN_FUNC_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_WOWLAN_UNICAST_PKT_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(11))
+#define SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(14))
+#define SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(15))
+
+#define SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(12))
+
+#define SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_NLO_FUN_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_NLO_PS_32K(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_NLO_IGNORE_SECURITY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
@@ -332,6 +504,8 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc);
+void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
@@ -340,4 +514,16 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
+void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
+ u8 pairwise_key_enc,
+ u8 group_key_enc);
+
+void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid);
+void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 3d91aea942c3..85a81a578fd5 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -15,6 +15,7 @@ struct rtw_hci_ops {
void (*stop)(struct rtw_dev *rtwdev);
void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
+ void (*interface_cfg)(struct rtw_dev *rtwdev);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -59,6 +60,11 @@ static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
rtwdev->hci.ops->link_ps(rtwdev, enter);
}
+static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->interface_cfg(rtwdev);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 507970387b2a..cadf0abbe16b 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -16,10 +16,12 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 value8;
txsc20 = primary_ch_idx;
- if (txsc20 == 1 || txsc20 == 3)
- txsc40 = 9;
- else
- txsc40 = 10;
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ if (txsc20 == 1 || txsc20 == 3)
+ txsc40 = 9;
+ else
+ txsc40 = 10;
+ }
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
@@ -1034,5 +1036,7 @@ int rtw_mac_init(struct rtw_dev *rtwdev)
if (ret)
return ret;
+ rtw_hci_interface_cfg(rtwdev);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 34a1c3b53cd4..6fc33e11d08c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -12,6 +12,7 @@
#include "reg.h"
#include "bf.h"
#include "debug.h"
+#include "wow.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -152,12 +153,10 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
u8 bcn_ctrl = 0;
rtwvif->port = port;
- rtwvif->vif = vif;
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
- rtwvif->in_lps = false;
memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
rtw_txq_init(rtwdev, vif->txq);
@@ -735,6 +734,44 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_PM
+static int rtw_ops_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_wow_suspend(rtwdev, wowlan);
+ if (ret)
+ rtw_err(rtwdev, "failed to suspend for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret ? 1 : 0;
+}
+
+static int rtw_ops_resume(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_wow_resume(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to resume for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret ? 1 : 0;
+}
+
+static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ device_set_wakeup_enable(rtwdev->dev, enabled);
+}
+#endif
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -757,5 +794,10 @@ const struct ieee80211_ops rtw_ops = {
.sta_statistics = rtw_ops_sta_statistics,
.flush = rtw_ops_flush,
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
+#ifdef CONFIG_PM
+ .suspend = rtw_ops_suspend,
+ .resume = rtw_ops_resume,
+ .set_wakeup = rtw_ops_set_wakeup,
+#endif
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index ae61415e1665..2845d2838f7b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -706,8 +706,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
is_support_sgi = true;
} else if (sta->ht_cap.ht_supported) {
- ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
- (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
+ ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12);
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
@@ -717,6 +717,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
is_support_sgi = true;
}
+ if (efuse->hw_cap.nss == 1)
+ ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
+
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
if (sta->vht_cap.vht_supported) {
@@ -750,11 +753,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
wireless_set = 0;
}
- if (efuse->hw_cap.nss == 1) {
- ra_mask &= RA_MASK_VHT_RATES_1SS;
- ra_mask &= RA_MASK_HT_RATES_1SS;
- }
-
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW_CHANNEL_WIDTH_80;
@@ -793,6 +791,26 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rtw_fw_send_ra_info(rtwdev, si);
}
+static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fw_state *fw;
+
+ fw = &rtwdev->fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+
+ if (chip->wow_fw_name) {
+ fw = &rtwdev->wow_fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtw_power_on(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -813,11 +831,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
goto err;
}
- wait_for_completion(&fw->completion);
- if (!fw->firmware) {
- ret = -EINVAL;
- rtw_err(rtwdev, "failed to load firmware\n");
- goto err;
+ ret = rtw_wait_firmware_completion(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to wait firmware completion\n");
+ goto err_off;
}
ret = rtw_download_firmware(rtwdev, fw);
@@ -881,7 +898,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
static void rtw_power_off(struct rtw_dev *rtwdev)
{
- rtwdev->hci.ops->stop(rtwdev);
+ rtw_hci_stop(rtwdev);
rtw_mac_power_off(rtwdev);
}
@@ -1020,8 +1037,8 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
- struct rtw_dev *rtwdev = context;
- struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_fw_state *fw = context;
+ struct rtw_dev *rtwdev = fw->rtwdev;
const struct rtw_fw_hdr *fw_hdr;
if (!firmware || !firmware->data) {
@@ -1043,17 +1060,35 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
fw->version, fw->sub_version, fw->sub_index, fw->h2c_version);
}
-static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
+static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type)
{
- struct rtw_fw_state *fw = &rtwdev->fw;
+ const char *fw_name;
+ struct rtw_fw_state *fw;
int ret;
+ switch (type) {
+ case RTW_WOWLAN_FW:
+ fw = &rtwdev->wow_fw;
+ fw_name = rtwdev->chip->wow_fw_name;
+ break;
+
+ case RTW_NORMAL_FW:
+ fw = &rtwdev->fw;
+ fw_name = rtwdev->chip->fw_name;
+ break;
+
+ default:
+ rtw_warn(rtwdev, "unsupported firmware type\n");
+ return -ENOENT;
+ }
+
+ fw->rtwdev = rtwdev;
init_completion(&fw->completion);
ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
- GFP_KERNEL, rtwdev, rtw_load_firmware_cb);
+ GFP_KERNEL, fw, rtw_load_firmware_cb);
if (ret) {
- rtw_err(rtwdev, "async firmware request failed\n");
+ rtw_err(rtwdev, "failed to async firmware request\n");
return ret;
}
@@ -1341,7 +1376,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
- spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
spin_lock_init(&rtwdev->txq_lock);
@@ -1372,12 +1406,19 @@ int rtw_core_init(struct rtw_dev *rtwdev)
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
BIT_AB | BIT_AM | BIT_APM;
- ret = rtw_load_firmware(rtwdev, rtwdev->chip->fw_name);
+ ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
return ret;
}
+ if (chip->wow_fw_name) {
+ ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
+ if (ret) {
+ rtw_warn(rtwdev, "no wow firmware loaded\n");
+ return ret;
+ }
+ }
return 0;
}
EXPORT_SYMBOL(rtw_core_init);
@@ -1385,12 +1426,16 @@ EXPORT_SYMBOL(rtw_core_init);
void rtw_core_deinit(struct rtw_dev *rtwdev)
{
struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_fw_state *wow_fw = &rtwdev->wow_fw;
struct rtw_rsvd_page *rsvd_pkt, *tmp;
unsigned long flags;
if (fw->firmware)
release_firmware(fw->firmware);
+ if (wow_fw->firmware)
+ release_firmware(wow_fw->firmware);
+
tasklet_kill(&rtwdev->tx_tasklet);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
@@ -1445,6 +1490,10 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
+ hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids;
+#endif
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index d012eefcd0da..f334d201bfb5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -19,6 +19,10 @@
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
+#define RTW_MAX_PATTERN_NUM 12
+#define RTW_MAX_PATTERN_MASK_SIZE 16
+#define RTW_MAX_PATTERN_SIZE 128
+
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
#define RFREG_MASK 0xfffff
@@ -193,6 +197,11 @@ enum rtw_rx_queue_type {
RTK_MAX_RX_QUEUE_NUM
};
+enum rtw_fw_type {
+ RTW_NORMAL_FW = 0x0,
+ RTW_WOWLAN_FW = 0x1,
+};
+
enum rtw_rate_index {
RTW_RATEID_BGN_40M_2SS = 0,
RTW_RATEID_BGN_40M_1SS = 1,
@@ -337,6 +346,7 @@ enum rtw_flags {
RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
+ RTW_FLAG_WOWLAN,
NUM_OF_RTW_FLAGS,
};
@@ -367,6 +377,15 @@ enum rtw_snr {
RTW_SNR_NUM
};
+enum rtw_wow_flags {
+ RTW_WOW_FLAG_EN_MAGIC_PKT,
+ RTW_WOW_FLAG_EN_REKEY_PKT,
+ RTW_WOW_FLAG_EN_DISCONNECT,
+
+ /* keep it last */
+ RTW_WOW_FLAG_MAX,
+};
+
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
@@ -608,6 +627,7 @@ struct rtw_lps_conf {
u8 smart_ps;
u8 port_id;
bool sec_cam_backup;
+ bool pattern_cam_backup;
};
enum rtw_hw_key_type {
@@ -716,7 +736,6 @@ struct rtw_bf_info {
};
struct rtw_vif {
- struct ieee80211_vif *vif;
enum rtw_net_type net_type;
u16 aid;
u8 mac_addr[ETH_ALEN];
@@ -727,7 +746,6 @@ struct rtw_vif {
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
- bool in_lps;
struct rtw_bfee bfee;
};
@@ -902,6 +920,33 @@ struct rtw_intf_phy_para {
u16 platform;
};
+struct rtw_wow_pattern {
+ u16 crc;
+ u8 type;
+ u8 valid;
+ u8 mask[RTW_MAX_PATTERN_MASK_SIZE];
+};
+
+struct rtw_pno_request {
+ bool inited;
+ u32 match_set_cnt;
+ struct cfg80211_match_set *match_sets;
+ u8 channel_cnt;
+ struct ieee80211_channel *channels;
+ struct cfg80211_sched_scan_plan scan_plan;
+};
+
+struct rtw_wow_param {
+ struct ieee80211_vif *wow_vif;
+ DECLARE_BITMAP(flags, RTW_WOW_FLAG_MAX);
+ u8 txpause;
+ u8 pattern_cnt;
+ struct rtw_wow_pattern patterns[RTW_MAX_PATTERN_NUM];
+
+ bool ips_enabled;
+ struct rtw_pno_request pno_req;
+};
+
struct rtw_intf_phy_para_table {
struct rtw_intf_phy_para *usb2_para;
struct rtw_intf_phy_para *usb3_para;
@@ -1030,6 +1075,10 @@ struct rtw_chip_info {
u8 bfer_su_max_num;
u8 bfer_mu_max_num;
+ const char *wow_fw_name;
+ const struct wiphy_wowlan_support *wowlan_stub;
+ const u8 max_sched_scan_ssids;
+
/* coex paras */
u32 coex_para_ver;
u8 bt_desired_ver;
@@ -1456,6 +1505,7 @@ struct rtw_fifo_conf {
struct rtw_fw_state {
const struct firmware *firmware;
+ struct rtw_dev *rtwdev;
struct completion completion;
u16 version;
u8 sub_version;
@@ -1534,9 +1584,6 @@ struct rtw_dev {
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
- /* lock for dm to use */
- spinlock_t dm_lock;
-
/* read/write rf register */
spinlock_t rf_lock;
@@ -1580,6 +1627,9 @@ struct rtw_dev {
u8 mp_mode;
+ struct rtw_fw_state wow_fw;
+ struct rtw_wow_param wow;
+
/* hci related data, must be last */
u8 priv[0] __aligned(sizeof(void *));
};
@@ -1605,6 +1655,18 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
+static inline bool rtw_ssid_equal(struct cfg80211_ssid *a,
+ struct cfg80211_ssid *b)
+{
+ if (!a || !b || a->ssid_len != b->ssid_len)
+ return false;
+
+ if (memcmp(a->ssid, b->ssid, a->ssid_len))
+ return false;
+
+ return true;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a58e8276a41a..1fbc14c149ec 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include "main.h"
#include "pci.h"
+#include "reg.h"
#include "tx.h"
#include "rx.h"
#include "fw.h"
@@ -486,13 +487,6 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
rtwpci->irq_enabled = false;
}
-static int rtw_pci_setup(struct rtw_dev *rtwdev)
-{
- rtw_pci_reset_trx_ring(rtwdev);
-
- return 0;
-}
-
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
/* reset dma and rx tag */
@@ -501,11 +495,22 @@ static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
rtwpci->rx_tag = 0;
}
+static int rtw_pci_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ rtw_pci_reset_trx_ring(rtwdev);
+ rtw_pci_dma_reset(rtwdev, rtwpci);
+
+ return 0;
+}
+
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
struct rtw_pci_tx_ring *tx_ring;
u8 queue;
+ rtw_pci_reset_trx_ring(rtwdev);
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
tx_ring = &rtwpci->tx_rings[queue];
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
@@ -517,8 +522,6 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
unsigned long flags;
- rtw_pci_dma_reset(rtwdev, rtwpci);
-
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw_pci_enable_interrupt(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
@@ -832,6 +835,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
while (count--) {
skb = skb_dequeue(&ring->queue);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
+ count, hw_queue, bd_idx, ring->r.rp, cur_rp);
+ break;
+ }
tx_data = rtw_pci_get_tx_data(skb);
pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
PCI_DMA_TODEVICE);
@@ -1222,6 +1230,21 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
rtwpci->link_ctrl = link_ctrl;
}
+static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ switch (chip->id) {
+ case RTW_CHIP_TYPE_8822C:
+ if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
+ rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
+ BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1264,6 +1287,23 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_pci_link_cfg(rtwdev);
}
+#ifdef CONFIG_PM
+static int rtw_pci_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int rtw_pci_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
+#define RTW_PM_OPS (&rtw_pm_ops)
+#else
+#define RTW_PM_OPS NULL
+#endif
+
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
int ret;
@@ -1330,6 +1370,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
.stop = rtw_pci_stop,
.deep_ps = rtw_pci_deep_ps,
.link_ps = rtw_pci_link_ps,
+ .interface_cfg = rtw_pci_interface_cfg,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1489,6 +1530,7 @@ static struct pci_driver rtw_pci_driver = {
.id_table = rtw_pci_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
+ .driver.pm = RTW_PM_OPS,
};
module_pci_driver(rtw_pci_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 49bf29a92152..1580cfc57361 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -210,7 +210,7 @@ struct rtw_pci {
void __iomem *mmap;
};
-static u32 max_num_of_tx_queue(u8 queue)
+static inline u32 max_num_of_tx_queue(u8 queue)
{
u32 max_num;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index a3e1e9578b65..eea9d888fbf1 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1434,7 +1434,7 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
rtw_load_table(rtwdev, chip->rfk_init_tbl);
- dpk_info->is_dpk_pwr_on = 1;
+ dpk_info->is_dpk_pwr_on = true;
}
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 913e6f47130f..7a189a9926fe 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -91,11 +91,11 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
return;
/* check confirm power mode has left power save state */
- for (polling_cnt = 0; polling_cnt < 3; polling_cnt++) {
+ for (polling_cnt = 0; polling_cnt < 50; polling_cnt++) {
polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
if ((polling ^ confirm) & BIT_RPWM_TOGGLE)
return;
- mdelay(20);
+ udelay(100);
}
/* in case of fw/hw missed the request, retry */
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 7e817bc997eb..9d94534c9674 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -9,6 +9,8 @@
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
#define BIT_FEN_BB_RSTB BIT(0)
+#define BIT_R_DIS_PRST BIT(6)
+#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
@@ -160,8 +162,12 @@
#define REG_CR 0x0100
#define REG_TRXFF_BNDY 0x0114
#define REG_RXFF_BNDY 0x011C
+#define REG_FE1IMR 0x0120
+#define BIT_FS_RXDONE BIT(16)
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_C2HEVT 0x01A0
+#define REG_MCUTST_II 0x01C4
+#define REG_WOWLAN_WAKE_REASON 0x01C7
#define REG_HMETFR 0x01CC
#define REG_HMEBOX0 0x01D0
#define REG_HMEBOX1 0x01D4
@@ -192,9 +198,18 @@
#define REG_H2C_TAIL 0x0248
#define REG_H2C_READ_ADDR 0x024C
#define REG_H2C_INFO 0x0254
+#define REG_RXPKT_NUM 0x0284
+#define BIT_RXDMA_REQ BIT(19)
+#define BIT_RW_RELEASE BIT(18)
+#define BIT_RXDMA_IDLE BIT(17)
+#define REG_RXPKTNUM 0x02B0
#define REG_INT_MIG 0x0304
+#define REG_HCI_MIX_CFG 0x03FC
+#define BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK BIT(26)
+#define REG_BCNQ_INFO 0x0418
+#define BIT_MGQ_CPU_EMPTY BIT(24)
#define REG_FWHW_TXQ_CTRL 0x0420
#define BIT_EN_BCNQ_DL BIT(22)
#define BIT_EN_WR_FREE_TAIL BIT(20)
@@ -323,6 +338,20 @@
#define BIT_RFMOD_80M BIT(8)
#define BIT_RFMOD_40M BIT(7)
#define REG_WMAC_TRXPTCL_CTL_H 0x066C
+#define REG_WKFMCAM_CMD 0x0698
+#define BIT_WKFCAM_POLLING_V1 BIT(31)
+#define BIT_WKFCAM_CLR_V1 BIT(30)
+#define BIT_WKFCAM_WE BIT(16)
+#define BIT_SHIFT_WKFCAM_ADDR_V2 8
+#define BIT_MASK_WKFCAM_ADDR_V2 0xff
+#define BIT_WKFCAM_ADDR_V2(x) \
+ (((x) & BIT_MASK_WKFCAM_ADDR_V2) << BIT_SHIFT_WKFCAM_ADDR_V2)
+#define REG_WKFMCAM_RWD 0x069C
+#define BIT_WKFMCAM_VALID BIT(31)
+#define BIT_WKFMCAM_BC BIT(26)
+#define BIT_WKFMCAM_MC BIT(25)
+#define BIT_WKFMCAM_UC BIT(24)
+
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 174029836833..3865097696d4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3487,12 +3487,12 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
- RTW_PWR_CMD_WRITE, BIT(7), 0},
- {0x0005,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x1018,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
- RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -4060,6 +4060,18 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_NET_DETECT,
+ .n_patterns = RTW_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+ .max_nd_match_sets = 4,
+};
+#endif
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -4106,6 +4118,11 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+#ifdef CONFIG_PM
+ .wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
+ .wowlan_stub = &rtw_wowlan_stub_8822c,
+ .max_sched_scan_ssids = 4,
+#endif
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
.scbd_support = true,
@@ -4135,3 +4152,4 @@ struct rtw_chip_info rtw8822c_hw_spec = {
EXPORT_SYMBOL(rtw8822c_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
+MODULE_FIRMWARE("rtw88/rtw8822c_wow_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
index 7bd2843b0bce..41c10e7144df 100644
--- a/drivers/net/wireless/realtek/rtw88/util.h
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -15,6 +15,8 @@ struct rtw_dev;
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
#define rtw_iterate_stas_atomic(rtwdev, iterator, data) \
ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
+#define rtw_iterate_keys(rtwdev, vif, iterator, data) \
+ ieee80211_iter_keys(rtwdev->hw, vif, iterator, data)
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
new file mode 100644
index 000000000000..af5c27e1bb07
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "wow.h"
+#include "reg.h"
+#include "debug.h"
+#include "mac.h"
+#include "ps.h"
+
+static void rtw_wow_show_wakeup_reason(struct rtw_dev *rtwdev)
+{
+ u8 reason;
+
+ reason = rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON);
+
+ if (reason == RTW_WOW_RSN_RX_DEAUTH)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx deauth\n");
+ else if (reason == RTW_WOW_RSN_DISCONNECT)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: AP is off\n");
+ else if (reason == RTW_WOW_RSN_RX_MAGIC_PKT)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx magic packet\n");
+ else if (reason == RTW_WOW_RSN_RX_GTK_REKEY)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx gtk rekey\n");
+ else if (reason == RTW_WOW_RSN_RX_PTK_REKEY)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx ptk rekey\n");
+ else if (reason == RTW_WOW_RSN_RX_PATTERN_MATCH)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx pattern match packet\n");
+ else if (reason == RTW_WOW_RSN_RX_NLO)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "Rx NLO\n");
+ else
+ rtw_warn(rtwdev, "Unknown wakeup reason %x\n", reason);
+}
+
+static void rtw_wow_pattern_write_cam(struct rtw_dev *rtwdev, u8 addr,
+ u32 wdata)
+{
+ rtw_write32(rtwdev, REG_WKFMCAM_RWD, wdata);
+ rtw_write32(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1 |
+ BIT_WKFCAM_WE | BIT_WKFCAM_ADDR_V2(addr));
+
+ if (!check_hw_ready(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1, 0))
+ rtw_err(rtwdev, "failed to write pattern cam\n");
+}
+
+static void rtw_wow_pattern_write_cam_ent(struct rtw_dev *rtwdev, u8 id,
+ struct rtw_wow_pattern *rtw_pattern)
+{
+ int i;
+ u8 addr;
+ u32 wdata;
+
+ for (i = 0; i < RTW_MAX_PATTERN_MASK_SIZE / 4; i++) {
+ addr = (id << 3) + i;
+ wdata = rtw_pattern->mask[i * 4];
+ wdata |= rtw_pattern->mask[i * 4 + 1] << 8;
+ wdata |= rtw_pattern->mask[i * 4 + 2] << 16;
+ wdata |= rtw_pattern->mask[i * 4 + 3] << 24;
+ rtw_wow_pattern_write_cam(rtwdev, addr, wdata);
+ }
+
+ wdata = rtw_pattern->crc;
+ addr = (id << 3) + RTW_MAX_PATTERN_MASK_SIZE / 4;
+
+ switch (rtw_pattern->type) {
+ case RTW_PATTERN_BROADCAST:
+ wdata |= BIT_WKFMCAM_BC | BIT_WKFMCAM_VALID;
+ break;
+ case RTW_PATTERN_MULTICAST:
+ wdata |= BIT_WKFMCAM_MC | BIT_WKFMCAM_VALID;
+ break;
+ case RTW_PATTERN_UNICAST:
+ wdata |= BIT_WKFMCAM_UC | BIT_WKFMCAM_VALID;
+ break;
+ default:
+ break;
+ }
+ rtw_wow_pattern_write_cam(rtwdev, addr, wdata);
+}
+
+/* RTK internal CRC16 for Pattern Cam */
+static u16 __rtw_cal_crc16(u8 data, u16 crc)
+{
+ u8 shift_in, data_bit;
+ u8 crc_bit4, crc_bit11, crc_bit15;
+ u16 crc_result;
+ int index;
+
+ for (index = 0; index < 8; index++) {
+ crc_bit15 = ((crc & BIT(15)) ? 1 : 0);
+ data_bit = (data & (BIT(0) << index) ? 1 : 0);
+ shift_in = crc_bit15 ^ data_bit;
+
+ crc_result = crc << 1;
+
+ if (shift_in == 0)
+ crc_result &= (~BIT(0));
+ else
+ crc_result |= BIT(0);
+
+ crc_bit11 = ((crc & BIT(11)) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit11 == 0)
+ crc_result &= (~BIT(12));
+ else
+ crc_result |= BIT(12);
+
+ crc_bit4 = ((crc & BIT(4)) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit4 == 0)
+ crc_result &= (~BIT(5));
+ else
+ crc_result |= BIT(5);
+
+ crc = crc_result;
+ }
+ return crc;
+}
+
+static u16 rtw_calc_crc(u8 *pdata, int length)
+{
+ u16 crc = 0xffff;
+ int i;
+
+ for (i = 0; i < length; i++)
+ crc = __rtw_cal_crc16(pdata[i], crc);
+
+ /* get 1' complement */
+ return ~crc;
+}
+
+static void rtw_wow_pattern_generate(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ const struct cfg80211_pkt_pattern *pkt_pattern,
+ struct rtw_wow_pattern *rtw_pattern)
+{
+ const u8 *mask;
+ const u8 *pattern;
+ u8 mask_hw[RTW_MAX_PATTERN_MASK_SIZE] = {0};
+ u8 content[RTW_MAX_PATTERN_SIZE] = {0};
+ u8 mac_addr[ETH_ALEN] = {0};
+ u8 mask_len;
+ u16 count;
+ int len;
+ int i;
+
+ pattern = pkt_pattern->pattern;
+ len = pkt_pattern->pattern_len;
+ mask = pkt_pattern->mask;
+
+ ether_addr_copy(mac_addr, rtwvif->mac_addr);
+ memset(rtw_pattern, 0, sizeof(*rtw_pattern));
+
+ mask_len = DIV_ROUND_UP(len, 8);
+
+ if (is_broadcast_ether_addr(pattern))
+ rtw_pattern->type = RTW_PATTERN_BROADCAST;
+ else if (is_multicast_ether_addr(pattern))
+ rtw_pattern->type = RTW_PATTERN_MULTICAST;
+ else if (ether_addr_equal(pattern, mac_addr))
+ rtw_pattern->type = RTW_PATTERN_UNICAST;
+ else
+ rtw_pattern->type = RTW_PATTERN_INVALID;
+
+ /* translate mask from os to mask for hw
+ * pattern from OS uses 'ethenet frame', like this:
+ * | 6 | 6 | 2 | 20 | Variable | 4 |
+ * |--------+--------+------+-----------+------------+-----|
+ * | 802.3 Mac Header | IP Header | TCP Packet | FCS |
+ * | DA | SA | Type |
+ *
+ * BUT, packet catched by our HW is in '802.11 frame', begin from LLC
+ * | 24 or 30 | 6 | 2 | 20 | Variable | 4 |
+ * |-------------------+--------+------+-----------+------------+-----|
+ * | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS |
+ * | Others | Tpye |
+ *
+ * Therefore, we need translate mask_from_OS to mask_to_hw.
+ * We should left-shift mask by 6 bits, then set the new bit[0~5] = 0,
+ * because new mask[0~5] means 'SA', but our HW packet begins from LLC,
+ * bit[0~5] corresponds to first 6 Bytes in LLC, they just don't match.
+ */
+
+ /* Shift 6 bits */
+ for (i = 0; i < mask_len - 1; i++) {
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+ mask_hw[i] |= u8_get_bits(mask[i + 1], GENMASK(5, 0)) << 2;
+ }
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+
+ /* Set bit 0-5 to zero */
+ mask_hw[0] &= (~GENMASK(5, 0));
+
+ memcpy(rtw_pattern->mask, mask_hw, RTW_MAX_PATTERN_MASK_SIZE);
+
+ /* To get the wake up pattern from the mask.
+ * We do not count first 12 bits which means
+ * DA[6] and SA[6] in the pattern to match HW design.
+ */
+ count = 0;
+ for (i = 12; i < len; i++) {
+ if ((mask[i / 8] >> (i % 8)) & 0x01) {
+ content[count] = pattern[i];
+ count++;
+ }
+ }
+
+ rtw_pattern->crc = rtw_calc_crc(content, count);
+}
+
+static void rtw_wow_pattern_clear_cam(struct rtw_dev *rtwdev)
+{
+ bool ret;
+
+ rtw_write32(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1 |
+ BIT_WKFCAM_CLR_V1);
+
+ ret = check_hw_ready(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1, 0);
+ if (!ret)
+ rtw_err(rtwdev, "failed to clean pattern cam\n");
+}
+
+static void rtw_wow_pattern_write(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_wow_pattern *rtw_pattern = rtw_wow->patterns;
+ int i = 0;
+
+ for (i = 0; i < rtw_wow->pattern_cnt; i++)
+ rtw_wow_pattern_write_cam_ent(rtwdev, i, rtw_pattern + i);
+}
+
+static void rtw_wow_pattern_clear(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_wow_pattern_clear_cam(rtwdev);
+
+ rtw_wow->pattern_cnt = 0;
+ memset(rtw_wow->patterns, 0, sizeof(rtw_wow->patterns));
+}
+
+static void rtw_wow_bb_stop(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ /* wait 100ms for firmware to finish TX */
+ msleep(100);
+
+ if (!rtw_read32_mask(rtwdev, REG_BCNQ_INFO, BIT_MGQ_CPU_EMPTY))
+ rtw_warn(rtwdev, "Wrong status of MGQ_CPU empty!\n");
+
+ rtw_wow->txpause = rtw_read8(rtwdev, REG_TXPAUSE);
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xff);
+ rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+}
+
+static void rtw_wow_bb_start(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+ rtw_write8(rtwdev, REG_TXPAUSE, rtw_wow->txpause);
+}
+
+static void rtw_wow_rx_dma_stop(struct rtw_dev *rtwdev)
+{
+ /* wait 100ms for HW to finish rx dma */
+ msleep(100);
+
+ rtw_write32_set(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
+
+ if (!check_hw_ready(rtwdev, REG_RXPKT_NUM, BIT_RXDMA_IDLE, 1))
+ rtw_err(rtwdev, "failed to stop rx dma\n");
+}
+
+static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
+{
+ rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
+}
+
+static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
+{
+ bool ret;
+
+ /* wait 100ms for wow firmware to finish work */
+ msleep(100);
+
+ if (wow_enable) {
+ if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
+ ret = 0;
+ } else {
+ if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 &&
+ rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0)
+ ret = 0;
+ }
+
+ if (ret)
+ rtw_err(rtwdev, "failed to check wow status %s\n",
+ wow_enable ? "enabled" : "disabled");
+
+ return ret;
+}
+
+static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw_fw_key_type_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = hw->priv;
+ u8 hw_key_type;
+
+ if (vif != rtwdev->wow.wow_vif)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ hw_key_type = RTW_CAM_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ hw_key_type = RTW_CAM_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ hw_key_type = RTW_CAM_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hw_key_type = RTW_CAM_AES;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ default:
+ rtw_err(rtwdev, "Unsupported key type for wowlan mode\n");
+ hw_key_type = 0;
+ break;
+ }
+
+ if (sta)
+ iter_data->pairwise_key_type = hw_key_type;
+ else
+ iter_data->group_key_type = hw_key_type;
+}
+
+static void rtw_wow_fw_security_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_key_type_iter_data data = {};
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+
+ data.rtwdev = rtwdev;
+ rtw_iterate_keys(rtwdev, wow_vif,
+ rtw_wow_fw_security_type_iter, &data);
+ rtw_fw_set_aoac_global_info_cmd(rtwdev, data.pairwise_key_type,
+ data.group_key_type);
+}
+
+static int rtw_wow_fw_start(struct rtw_dev *rtwdev)
+{
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_send_rsvd_page_h2c(rtwdev);
+ rtw_wow_pattern_write(rtwdev);
+ rtw_wow_fw_security_type(rtwdev);
+ rtw_fw_set_disconnect_decision_cmd(rtwdev, true);
+ rtw_fw_set_keep_alive_cmd(rtwdev, true);
+ } else if (rtw_wow_no_link(rtwdev)) {
+ rtw_fw_set_nlo_info(rtwdev, true);
+ rtw_fw_update_pkt_probe_req(rtwdev, NULL);
+ rtw_fw_channel_switch(rtwdev, true);
+ }
+
+ rtw_fw_set_wowlan_ctrl_cmd(rtwdev, true);
+ rtw_fw_set_remote_wake_ctrl_cmd(rtwdev, true);
+
+ return rtw_wow_check_fw_status(rtwdev, true);
+}
+
+static int rtw_wow_fw_stop(struct rtw_dev *rtwdev)
+{
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_fw_set_disconnect_decision_cmd(rtwdev, false);
+ rtw_fw_set_keep_alive_cmd(rtwdev, false);
+ rtw_wow_pattern_clear(rtwdev);
+ } else if (rtw_wow_no_link(rtwdev)) {
+ rtw_fw_channel_switch(rtwdev, false);
+ rtw_fw_set_nlo_info(rtwdev, false);
+ }
+
+ rtw_fw_set_wowlan_ctrl_cmd(rtwdev, false);
+ rtw_fw_set_remote_wake_ctrl_cmd(rtwdev, false);
+
+ return rtw_wow_check_fw_status(rtwdev, false);
+}
+
+static void rtw_wow_avoid_reset_mac(struct rtw_dev *rtwdev)
+{
+ /* When resuming from wowlan mode, some hosts issue signal
+ * (PCIE: PREST, USB: SE0RST) to device, and lead to reset
+ * mac core. If it happens, the connection to AP will be lost.
+ * Setting REG_RSV_CTRL Register can avoid this process.
+ */
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ case RTW_HCI_TYPE_USB:
+ rtw_write8(rtwdev, REG_RSV_CTRL, BIT_WLOCK_1C_B6);
+ rtw_write8(rtwdev, REG_RSV_CTRL,
+ BIT_WLOCK_1C_B6 | BIT_R_DIS_PRST);
+ break;
+ default:
+ rtw_warn(rtwdev, "Unsupported hci type to disable reset MAC\n");
+ break;
+ }
+}
+
+static void rtw_wow_fw_media_status_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct rtw_fw_media_status_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+
+ rtw_fw_media_status_report(rtwdev, si->mac_id, iter_data->connect);
+}
+
+static void rtw_wow_fw_media_status(struct rtw_dev *rtwdev, bool connect)
+{
+ struct rtw_fw_media_status_iter_data data;
+
+ data.rtwdev = rtwdev;
+ data.connect = connect;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_wow_fw_media_status_iter, &data);
+}
+
+static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
+ struct cfg80211_ssid *ssid;
+ int i;
+
+ for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
+ ssid = &rtw_pno_req->match_sets[i].ssid;
+ rtw_add_rsvd_page_probe_req(rtwdev, ssid);
+ }
+ rtw_add_rsvd_page_probe_req(rtwdev, NULL);
+ rtw_add_rsvd_page(rtwdev, RSVD_NLO_INFO, false);
+ rtw_add_rsvd_page(rtwdev, RSVD_CH_INFO, true);
+}
+
+static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev)
+{
+ rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
+}
+
+static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev)
+{
+ rtw_reset_rsvd_page(rtwdev);
+
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_wow_config_linked_rsvd_page(rtwdev);
+ } else if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags) &&
+ rtw_wow_no_link(rtwdev)) {
+ rtw_wow_config_pno_rsvd_page(rtwdev);
+ }
+}
+
+static int rtw_wow_dl_fw_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+
+ rtw_wow_config_rsvd_page(rtwdev);
+
+ return rtw_fw_download_rsvd_page(rtwdev, wow_vif);
+}
+
+static int rtw_wow_swap_fw(struct rtw_dev *rtwdev, enum rtw_fw_type type)
+{
+ struct rtw_fw_state *fw;
+ int ret;
+
+ switch (type) {
+ case RTW_WOWLAN_FW:
+ fw = &rtwdev->wow_fw;
+ break;
+
+ case RTW_NORMAL_FW:
+ fw = &rtwdev->fw;
+ break;
+
+ default:
+ rtw_warn(rtwdev, "unsupported firmware type to swap\n");
+ return -ENOENT;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret)
+ goto out;
+
+ rtw_fw_send_general_info(rtwdev);
+ rtw_fw_send_phydm_info(rtwdev);
+ rtw_wow_fw_media_status(rtwdev, true);
+
+out:
+ return ret;
+}
+
+static void rtw_wow_check_pno(struct rtw_dev *rtwdev,
+ struct cfg80211_sched_scan_request *nd_config)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *pno_req = &rtw_wow->pno_req;
+ struct ieee80211_channel *channel;
+ int i, size;
+
+ if (!nd_config->n_match_sets || !nd_config->n_channels)
+ goto err;
+
+ pno_req->match_set_cnt = nd_config->n_match_sets;
+ size = sizeof(*pno_req->match_sets) * pno_req->match_set_cnt;
+ pno_req->match_sets = kmemdup(nd_config->match_sets, size, GFP_KERNEL);
+ if (!pno_req->match_sets)
+ goto err;
+
+ pno_req->channel_cnt = nd_config->n_channels;
+ size = sizeof(*nd_config->channels[0]) * nd_config->n_channels;
+ pno_req->channels = kmalloc(size, GFP_KERNEL);
+ if (!pno_req->channels)
+ goto channel_err;
+
+ for (i = 0 ; i < pno_req->channel_cnt; i++) {
+ channel = pno_req->channels + i;
+ memcpy(channel, nd_config->channels[i], sizeof(*channel));
+ }
+
+ pno_req->scan_plan = *nd_config->scan_plans;
+ pno_req->inited = true;
+
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: net-detect is enabled\n");
+
+ return;
+
+channel_err:
+ kfree(pno_req->match_sets);
+
+err:
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: net-detect is disabled\n");
+}
+
+static int rtw_wow_leave_linked_ps(struct rtw_dev *rtwdev)
+{
+ if (!test_bit(RTW_FLAG_WOWLAN, rtwdev->flags))
+ cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+
+ rtw_leave_lps(rtwdev);
+
+ return 0;
+}
+
+static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ int ret = 0;
+
+ if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) {
+ if (rtw_fw_lps_deep_mode)
+ rtw_leave_lps_deep(rtwdev);
+ } else {
+ if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
+ rtw_wow->ips_enabled = true;
+ ret = rtw_leave_ips(rtwdev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw_wow_leave_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_mgd_linked(rtwdev))
+ ret = rtw_wow_leave_linked_ps(rtwdev);
+ else if (rtw_wow_no_link(rtwdev))
+ ret = rtw_wow_leave_no_link_ps(rtwdev);
+
+ return ret;
+}
+
+static int rtw_wow_restore_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_no_link(rtwdev) && rtwdev->wow.ips_enabled)
+ ret = rtw_enter_ips(rtwdev);
+
+ return ret;
+}
+
+static int rtw_wow_enter_linked_ps(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ rtw_enter_lps(rtwdev, rtwvif->port);
+
+ return 0;
+}
+
+static int rtw_wow_enter_no_link_ps(struct rtw_dev *rtwdev)
+{
+ /* firmware enters deep ps by itself if supported */
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+
+ return 0;
+}
+
+static int rtw_wow_enter_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_mgd_linked(rtwdev))
+ ret = rtw_wow_enter_linked_ps(rtwdev);
+ else if (rtw_wow_no_link(rtwdev) && rtw_fw_lps_deep_mode)
+ ret = rtw_wow_enter_no_link_ps(rtwdev);
+
+ return ret;
+}
+
+static void rtw_wow_stop_trx(struct rtw_dev *rtwdev)
+{
+ rtw_wow_bb_stop(rtwdev);
+ rtw_wow_rx_dma_stop(rtwdev);
+}
+
+static int rtw_wow_start(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_wow_fw_start(rtwdev);
+ if (ret)
+ goto out;
+
+ rtw_hci_stop(rtwdev);
+ rtw_wow_bb_start(rtwdev);
+ rtw_wow_avoid_reset_mac(rtwdev);
+
+out:
+ return ret;
+}
+
+static int rtw_wow_enable(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ rtw_wow_stop_trx(rtwdev);
+
+ ret = rtw_wow_swap_fw(rtwdev, RTW_WOWLAN_FW);
+ if (ret) {
+ rtw_err(rtwdev, "failed to swap wow fw\n");
+ goto error;
+ }
+
+ set_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw_wow_dl_fw_rsvd_page(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download wowlan rsvd page\n");
+ goto error;
+ }
+
+ ret = rtw_wow_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start wow\n");
+ goto error;
+ }
+
+ return ret;
+
+error:
+ clear_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+ return ret;
+}
+
+static int rtw_wow_stop(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ /* some HCI related registers will be reset after resume,
+ * need to set them again.
+ */
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ return ret;
+ }
+
+ ret = rtw_hci_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start hci\n");
+ return ret;
+ }
+
+ ret = rtw_wow_fw_stop(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to stop wowlan fw\n");
+
+ rtw_wow_bb_stop(rtwdev);
+
+ return ret;
+}
+
+static void rtw_wow_resume_trx(struct rtw_dev *rtwdev)
+{
+ rtw_wow_rx_dma_start(rtwdev);
+ rtw_wow_bb_start(rtwdev);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+}
+
+static int rtw_wow_disable(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ clear_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw_wow_stop(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to stop wow\n");
+ goto out;
+ }
+
+ ret = rtw_wow_swap_fw(rtwdev, RTW_NORMAL_FW);
+ if (ret) {
+ rtw_err(rtwdev, "failed to swap normal fw\n");
+ goto out;
+ }
+
+ ret = rtw_wow_dl_fw_rsvd_page(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to download normal rsvd page\n");
+
+out:
+ rtw_wow_resume_trx(rtwdev);
+ return ret;
+}
+
+static void rtw_wow_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ /* Current wowlan function support setting of only one STATION vif.
+ * So when one suitable vif is found, stop the iteration.
+ */
+ if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ switch (rtwvif->net_type) {
+ case RTW_NET_MGD_LINKED:
+ rtw_wow->wow_vif = vif;
+ break;
+ case RTW_NET_NO_LINK:
+ if (rtw_wow->pno_req.inited)
+ rtwdev->wow.wow_vif = vif;
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtw_wow_set_wakeups(struct rtw_dev *rtwdev,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_wow_pattern *rtw_patterns = rtw_wow->patterns;
+ struct rtw_vif *rtwvif;
+ int i;
+
+ if (wowlan->disconnect)
+ set_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags);
+ if (wowlan->magic_pkt)
+ set_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags);
+ if (wowlan->gtk_rekey_failure)
+ set_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags);
+
+ if (wowlan->nd_config)
+ rtw_wow_check_pno(rtwdev, wowlan->nd_config);
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_wow_vif_iter, rtwdev);
+ if (!rtw_wow->wow_vif)
+ return -EPERM;
+
+ rtwvif = (struct rtw_vif *)rtw_wow->wow_vif->drv_priv;
+ if (wowlan->n_patterns && wowlan->patterns) {
+ rtw_wow->pattern_cnt = wowlan->n_patterns;
+ for (i = 0; i < wowlan->n_patterns; i++)
+ rtw_wow_pattern_generate(rtwdev, rtwvif,
+ wowlan->patterns + i,
+ rtw_patterns + i);
+ }
+
+ return 0;
+}
+
+static void rtw_wow_clear_wakeups(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *pno_req = &rtw_wow->pno_req;
+
+ if (pno_req->inited) {
+ kfree(pno_req->channels);
+ kfree(pno_req->match_sets);
+ }
+
+ memset(rtw_wow, 0, sizeof(rtwdev->wow));
+}
+
+int rtw_wow_suspend(struct rtw_dev *rtwdev, struct cfg80211_wowlan *wowlan)
+{
+ int ret = 0;
+
+ ret = rtw_wow_set_wakeups(rtwdev, wowlan);
+ if (ret) {
+ rtw_err(rtwdev, "failed to set wakeup event\n");
+ goto out;
+ }
+
+ ret = rtw_wow_leave_ps(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ps from normal mode\n");
+ goto out;
+ }
+
+ ret = rtw_wow_enable(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to enable wow\n");
+ rtw_wow_restore_ps(rtwdev);
+ goto out;
+ }
+
+ ret = rtw_wow_enter_ps(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to enter ps for wow\n");
+
+out:
+ return ret;
+}
+
+int rtw_wow_resume(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ /* If wowlan mode is not enabled, do nothing */
+ if (!test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) {
+ rtw_err(rtwdev, "wow is not enabled\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = rtw_wow_leave_ps(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ps from wowlan mode\n");
+ goto out;
+ }
+
+ rtw_wow_show_wakeup_reason(rtwdev);
+
+ ret = rtw_wow_disable(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to disable wow\n");
+ goto out;
+ }
+
+ ret = rtw_wow_restore_ps(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to restore ps to normal mode\n");
+
+out:
+ rtw_wow_clear_wakeups(rtwdev);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/wow.h b/drivers/net/wireless/realtek/rtw88/wow.h
new file mode 100644
index 000000000000..289368a2cba4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/wow.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_WOW_H__
+#define __RTW_WOW_H__
+
+#define PNO_CHECK_BYTE 4
+
+enum rtw_wow_pattern_type {
+ RTW_PATTERN_BROADCAST = 0,
+ RTW_PATTERN_MULTICAST,
+ RTW_PATTERN_UNICAST,
+ RTW_PATTERN_VALID,
+ RTW_PATTERN_INVALID,
+};
+
+enum rtw_wake_reason {
+ RTW_WOW_RSN_RX_PTK_REKEY = 0x1,
+ RTW_WOW_RSN_RX_GTK_REKEY = 0x2,
+ RTW_WOW_RSN_RX_DEAUTH = 0x8,
+ RTW_WOW_RSN_DISCONNECT = 0x10,
+ RTW_WOW_RSN_RX_MAGIC_PKT = 0x21,
+ RTW_WOW_RSN_RX_PATTERN_MATCH = 0x23,
+ RTW_WOW_RSN_RX_NLO = 0x55,
+};
+
+struct rtw_fw_media_status_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 connect;
+};
+
+struct rtw_fw_key_type_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 group_key_type;
+ u8 pairwise_key_type;
+};
+
+static inline bool rtw_wow_mgd_linked(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ return (rtwvif->net_type == RTW_NET_MGD_LINKED);
+}
+
+static inline bool rtw_wow_no_link(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ return (rtwvif->net_type == RTW_NET_NO_LINK);
+}
+
+int rtw_wow_suspend(struct rtw_dev *rtwdev, struct cfg80211_wowlan *wowlan);
+int rtw_wow_resume(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index f84250bdb8cf..6f8d5f9a9f7e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -622,6 +622,7 @@ static int bl_cmd(struct rsi_hw *adapter, u8 cmd, u8 exp_resp, char *str)
bl_start_cmd_timer(adapter, timeout);
status = bl_write_cmd(adapter, cmd, exp_resp, &regout_val);
if (status < 0) {
+ bl_stop_cmd_timer(adapter);
rsi_dbg(ERR_ZONE,
"%s: Command %s (%0x) writing failed..\n",
__func__, str, cmd);
@@ -737,10 +738,9 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
}
status = bl_cmd(adapter, cmd_req, cmd_resp, str);
- if (status) {
- bl_stop_cmd_timer(adapter);
+ if (status)
return status;
- }
+
return 0;
}
@@ -828,10 +828,9 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
status = bl_cmd(adapter, EOF_REACHED, FW_LOADING_SUCCESSFUL,
"EOF_REACHED");
- if (status) {
- bl_stop_cmd_timer(adapter);
+ if (status)
return status;
- }
+
rsi_dbg(INFO_ZONE, "FW loading is done and FW is running..\n");
return 0;
}
@@ -849,6 +848,7 @@ static int rsi_hal_prepare_fwload(struct rsi_hw *adapter)
&regout_val,
RSI_COMMON_REG_SIZE);
if (status < 0) {
+ bl_stop_cmd_timer(adapter);
rsi_dbg(ERR_ZONE,
"%s: REGOUT read failed\n", __func__);
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 53f41fc2cadf..a62d41c0ccbc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/types.h>
#include <net/rsi_91x.h>
#include "rsi_usb.h"
#include "rsi_hal.h"
@@ -29,7 +30,7 @@ MODULE_PARM_DESC(dev_oper_mode,
"9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
"6[AP + BT classic], 14[AP + BT classic + BT LE]");
-static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num);
+static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags);
/**
* rsi_usb_card_write() - This function writes to the USB Card.
@@ -117,7 +118,7 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
__le16 buffer_size;
int ii, bin_found = 0, bout_found = 0;
- iface_desc = &(interface->altsetting[0]);
+ iface_desc = interface->cur_altsetting;
for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
endpoint = &(iface_desc->endpoint[ii].desc);
@@ -148,9 +149,17 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
break;
}
- if (!(dev->bulkin_endpoint_addr[0]) &&
- dev->bulkout_endpoint_addr[0])
+ if (!(dev->bulkin_endpoint_addr[0] && dev->bulkout_endpoint_addr[0])) {
+ dev_err(&interface->dev, "missing wlan bulk endpoints\n");
return -EINVAL;
+ }
+
+ if (adapter->priv->coex_mode > 1) {
+ if (!dev->bulkin_endpoint_addr[1]) {
+ dev_err(&interface->dev, "missing bt bulk-in endpoint\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -285,20 +294,29 @@ static void rsi_rx_done_handler(struct urb *urb)
status = 0;
out:
- if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num))
+ if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC))
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__);
if (status)
dev_kfree_skb(rx_cb->rx_skb);
}
+static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1];
+ struct urb *urb = rx_cb->rx_urb;
+
+ usb_kill_urb(urb);
+}
+
/**
* rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
* @adapter: Pointer to the adapter structure.
*
* Return: 0 on success, a negative error code on failure.
*/
-static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
+static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1];
@@ -328,9 +346,11 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
rsi_rx_done_handler,
rx_cb);
- status = usb_submit_urb(urb, GFP_KERNEL);
- if (status)
+ status = usb_submit_urb(urb, mem_flags);
+ if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
+ dev_kfree_skb(skb);
+ }
return status;
}
@@ -816,17 +836,20 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(INIT_ZONE, "%s: Device Init Done\n", __func__);
}
- status = rsi_rx_urb_submit(adapter, WLAN_EP);
+ status = rsi_rx_urb_submit(adapter, WLAN_EP, GFP_KERNEL);
if (status)
goto err1;
if (adapter->priv->coex_mode > 1) {
- status = rsi_rx_urb_submit(adapter, BT_EP);
+ status = rsi_rx_urb_submit(adapter, BT_EP, GFP_KERNEL);
if (status)
- goto err1;
+ goto err_kill_wlan_urb;
}
return 0;
+
+err_kill_wlan_urb:
+ rsi_rx_urb_kill(adapter, WLAN_EP);
err1:
rsi_deinit_usb_interface(adapter);
err:
@@ -857,6 +880,10 @@ static void rsi_disconnect(struct usb_interface *pfunction)
adapter->priv->bt_adapter = NULL;
}
+ if (adapter->priv->coex_mode > 1)
+ rsi_rx_urb_kill(adapter, BT_EP);
+ rsi_rx_urb_kill(adapter, WLAN_EP);
+
rsi_reset_card(adapter);
rsi_deinit_usb_interface(adapter);
rsi_91x_deinit(adapter);
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index 2dfcdb145944..400dd585916b 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -715,7 +715,7 @@ void cw1200_tx(struct ieee80211_hw *dev,
};
struct ieee80211_sta *sta;
struct wsm_tx *wsm;
- bool tid_update = 0;
+ bool tid_update = false;
u8 flags = 0;
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 2a48fc6ad56c..6ef8fc9ae627 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1429,7 +1429,7 @@ out:
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+ u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1444,8 +1444,10 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
lid_type = WEP_DEFAULT_LID_TYPE;
else
lid_type = BROADCAST_LID_TYPE;
- } else {
+ } else if (is_pairwise) {
lid_type = UNICAST_LID_TYPE;
+ } else {
+ lid_type = BROADCAST_LID_TYPE;
}
wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 084375ba4abf..bfad7b5a1ac6 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -65,7 +65,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16);
+ u16 tx_seq_16, bool is_pairwise);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e994995d79ab..ed049c9f7e29 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3273,7 +3273,7 @@ out:
static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 id, u8 key_type, u8 key_size,
const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+ u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -3311,6 +3311,7 @@ static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ap_key->hlid = hlid;
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
+ ap_key->is_pairwise = is_pairwise;
wlvif->ap.recorded_keys[i] = ap_key;
return 0;
@@ -3346,7 +3347,7 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
- key->tx_seq_16);
+ key->tx_seq_16, key->is_pairwise);
if (ret < 0)
goto out;
@@ -3369,7 +3370,8 @@ out:
static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
- u16 tx_seq_16, struct ieee80211_sta *sta)
+ u16 tx_seq_16, struct ieee80211_sta *sta,
+ bool is_pairwise)
{
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
@@ -3396,12 +3398,12 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
- tx_seq_16);
+ tx_seq_16, is_pairwise);
} else {
ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
- tx_seq_16);
+ tx_seq_16, is_pairwise);
}
if (ret < 0)
@@ -3501,6 +3503,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
u16 tx_seq_16 = 0;
u8 key_type;
u8 hlid;
+ bool is_pairwise;
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
@@ -3550,12 +3553,14 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+
switch (cmd) {
case SET_KEY:
ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
- tx_seq_32, tx_seq_16, sta);
+ tx_seq_32, tx_seq_16, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not add or replace key");
return ret;
@@ -3581,7 +3586,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
- 0, 0, sta);
+ 0, 0, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not remove key");
return ret;
@@ -6223,6 +6228,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
@@ -6267,7 +6273,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
++ WIPHY_FLAG_IBSS_RSN;
wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 6fab60b0e367..eefae3f867b9 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -212,6 +212,7 @@ struct wl1271_ap_key {
u8 hlid;
u32 tx_seq_32;
u16 tx_seq_16;
+ bool is_pairwise;
};
enum wl12xx_flags {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 007bf6803293..686161db8706 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1285,7 +1285,7 @@ out:
return rc;
}
-static void wl3501_tx_timeout(struct net_device *dev)
+static void wl3501_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct net_device_stats *stats = &dev->stats;
int rc;
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 0db7362bedb4..41641fc2be74 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -830,7 +830,7 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void zd1201_tx_timeout(struct net_device *dev)
+static void zd1201_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct zd1201 *zd = netdev_priv(dev);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 7b5c2fe5bd4d..8ff0374126e4 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1263,7 +1263,7 @@ static void print_id(struct usb_device *udev)
static int eject_installer(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct usb_host_interface *iface_desc = &intf->altsetting[0];
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 10d580c3dea3..6b7532f7c936 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -51,7 +51,8 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
found = false;
oldest = NULL;
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+ lockdep_is_held(&vif->hash.cache.lock)) {
/* Make sure we don't add duplicate entries */
if (entry->len == len &&
memcmp(entry->tag, tag, len) == 0)
@@ -102,7 +103,8 @@ static void xenvif_flush_hash(struct xenvif *vif)
spin_lock_irqsave(&vif->hash.cache.lock, flags);
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+ lockdep_is_held(&vif->hash.cache.lock)) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f15ba3de6195..0c8a02a1ead7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -585,6 +585,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
struct net_device *dev = vif->dev;
void *addr;
struct xen_netif_ctrl_sring *shared;
+ RING_IDX rsp_prod, req_prod;
int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
@@ -593,7 +594,14 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
goto err;
shared = (struct xen_netif_ctrl_sring *)addr;
- BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(shared->rsp_prod);
+ req_prod = READ_ONCE(shared->req_prod);
+
+ BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
+ goto err_unmap;
err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
if (err < 0)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0020b2e8c279..315dfc6ea297 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1453,7 +1453,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
-
+ RING_IDX rsp_prod, req_prod;
int err = -ENOMEM;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
@@ -1462,7 +1462,14 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
goto err;
txs = (struct xen_netif_tx_sring *)addr;
- BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(txs->rsp_prod);
+ req_prod = READ_ONCE(txs->req_prod);
+
+ BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
+ goto err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&rx_ring_ref, 1, &addr);
@@ -1470,7 +1477,14 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
- BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(rxs->rsp_prod);
+ req_prod = READ_ONCE(rxs->req_prod);
+
+ BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
+ goto err;
return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f533b7372d59..286054b60d47 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -195,185 +195,6 @@ static void xenvif_debugfs_delif(struct xenvif *vif)
}
#endif /* CONFIG_DEBUG_FS */
-static int netback_remove(struct xenbus_device *dev)
-{
- struct backend_info *be = dev_get_drvdata(&dev->dev);
-
- set_backend_state(be, XenbusStateClosed);
-
- unregister_hotplug_status_watch(be);
- if (be->vif) {
- kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
- xen_unregister_watchers(be->vif);
- xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_free(be->vif);
- be->vif = NULL;
- }
- kfree(be->hotplug_script);
- kfree(be);
- dev_set_drvdata(&dev->dev, NULL);
- return 0;
-}
-
-
-/**
- * Entry point to this code when a new device is created. Allocate the basic
- * structures and switch to InitWait.
- */
-static int netback_probe(struct xenbus_device *dev,
- const struct xenbus_device_id *id)
-{
- const char *message;
- struct xenbus_transaction xbt;
- int err;
- int sg;
- const char *script;
- struct backend_info *be = kzalloc(sizeof(struct backend_info),
- GFP_KERNEL);
- if (!be) {
- xenbus_dev_fatal(dev, -ENOMEM,
- "allocating backend structure");
- return -ENOMEM;
- }
-
- be->dev = dev;
- dev_set_drvdata(&dev->dev, be);
-
- be->state = XenbusStateInitialising;
- err = xenbus_switch_state(dev, XenbusStateInitialising);
- if (err)
- goto fail;
-
- sg = 1;
-
- do {
- err = xenbus_transaction_start(&xbt);
- if (err) {
- xenbus_dev_fatal(dev, err, "starting transaction");
- goto fail;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
- if (err) {
- message = "writing feature-sg";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
- "%d", sg);
- if (err) {
- message = "writing feature-gso-tcpv4";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
- "%d", sg);
- if (err) {
- message = "writing feature-gso-tcpv6";
- goto abort_transaction;
- }
-
- /* We support partial checksum setup for IPv6 packets */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-ipv6-csum-offload",
- "%d", 1);
- if (err) {
- message = "writing feature-ipv6-csum-offload";
- goto abort_transaction;
- }
-
- /* We support rx-copy path. */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-rx-copy", "%d", 1);
- if (err) {
- message = "writing feature-rx-copy";
- goto abort_transaction;
- }
-
- /*
- * We don't support rx-flip path (except old guests who don't
- * grok this feature flag).
- */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-rx-flip", "%d", 0);
- if (err) {
- message = "writing feature-rx-flip";
- goto abort_transaction;
- }
-
- /* We support dynamic multicast-control. */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-multicast-control", "%d", 1);
- if (err) {
- message = "writing feature-multicast-control";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename,
- "feature-dynamic-multicast-control",
- "%d", 1);
- if (err) {
- message = "writing feature-dynamic-multicast-control";
- goto abort_transaction;
- }
-
- err = xenbus_transaction_end(xbt, 0);
- } while (err == -EAGAIN);
-
- if (err) {
- xenbus_dev_fatal(dev, err, "completing transaction");
- goto fail;
- }
-
- /*
- * Split event channels support, this is optional so it is not
- * put inside the above loop.
- */
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "feature-split-event-channels",
- "%u", separate_tx_rx_irq);
- if (err)
- pr_debug("Error writing feature-split-event-channels\n");
-
- /* Multi-queue support: This is an optional feature. */
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "multi-queue-max-queues", "%u", xenvif_max_queues);
- if (err)
- pr_debug("Error writing multi-queue-max-queues\n");
-
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "feature-ctrl-ring",
- "%u", true);
- if (err)
- pr_debug("Error writing feature-ctrl-ring\n");
-
- script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
- if (IS_ERR(script)) {
- err = PTR_ERR(script);
- xenbus_dev_fatal(dev, err, "reading script");
- goto fail;
- }
-
- be->hotplug_script = script;
-
-
- /* This kicks hotplug scripts, so do it immediately. */
- err = backend_create_xenvif(be);
- if (err)
- goto fail;
-
- return 0;
-
-abort_transaction:
- xenbus_transaction_end(xbt, 1);
- xenbus_dev_fatal(dev, err, "%s", message);
-fail:
- pr_debug("failed\n");
- netback_remove(dev);
- return err;
-}
-
-
/*
* Handle the creation of the hotplug script environment. We add the script
* and vif variables to the environment, for the benefit of the vif-* hotplug
@@ -827,6 +648,7 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -1128,6 +950,174 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return 0;
}
+static int netback_remove(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ unregister_hotplug_status_watch(be);
+ if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ backend_disconnect(be);
+ xenvif_free(be->vif);
+ be->vif = NULL;
+ }
+ kfree(be->hotplug_script);
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and switch to InitWait.
+ */
+static int netback_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+ int sg;
+ const char *script;
+ struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
+
+ if (!be) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "allocating backend structure");
+ return -ENOMEM;
+ }
+
+ be->dev = dev;
+ dev_set_drvdata(&dev->dev, be);
+
+ sg = 1;
+
+ do {
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ /* We support partial checksum setup for IPv6 packets */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-ipv6-csum-offload",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
+ /* We support rx-copy path. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-copy", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-copy";
+ goto abort_transaction;
+ }
+
+ /* We don't support rx-flip path (except old guests who
+ * don't grok this feature flag).
+ */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-flip", "%d", 0);
+ if (err) {
+ message = "writing feature-rx-flip";
+ goto abort_transaction;
+ }
+
+ /* We support dynamic multicast-control. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-multicast-control", "%d", 1);
+ if (err) {
+ message = "writing feature-multicast-control";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-dynamic-multicast-control",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-dynamic-multicast-control";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ } while (err == -EAGAIN);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto fail;
+ }
+
+ /* Split event channels support, this is optional so it is not
+ * put inside the above loop.
+ */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-split-event-channels",
+ "%u", separate_tx_rx_irq);
+ if (err)
+ pr_debug("Error writing feature-split-event-channels\n");
+
+ /* Multi-queue support: This is an optional feature. */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "multi-queue-max-queues", "%u", xenvif_max_queues);
+ if (err)
+ pr_debug("Error writing multi-queue-max-queues\n");
+
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-ctrl-ring",
+ "%u", true);
+ if (err)
+ pr_debug("Error writing feature-ctrl-ring\n");
+
+ backend_switch_state(be, XenbusStateInitWait);
+
+ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+ if (IS_ERR(script)) {
+ err = PTR_ERR(script);
+ xenbus_dev_fatal(dev, err, "reading script");
+ goto fail;
+ }
+
+ be->hotplug_script = script;
+
+ /* This kicks hotplug scripts, so do it immediately. */
+ err = backend_create_xenvif(be);
+ if (err)
+ goto fail;
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+fail:
+ pr_debug("failed\n");
+ netback_remove(dev);
+ return err;
+}
+
static const struct xenbus_device_id netback_ids[] = {
{ "vif" },
{ "" }
@@ -1139,6 +1129,7 @@ static struct xenbus_driver netback_driver = {
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
+ .allow_rebind = true,
};
int xenvif_xenbus_init(void)
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 7507176cca0a..0207e66cee21 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -274,7 +274,6 @@ MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table);
static struct i2c_driver pn533_i2c_driver = {
.driver = {
.name = PN533_I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pn533_i2c_match),
},
.probe = pn533_i2c_probe,
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 4590fbf82dc2..f5bb7ace2ff5 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
cmd, sizeof(cmd), false);
rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
- &transferred, 0);
+ &transferred, 5000);
kfree(buffer);
if (rc || (transferred != sizeof(cmd))) {
nfc_err(&phy->udev->dev,
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index cda996f6954e..2b83156efe3f 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -693,7 +693,7 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
target->nfcid1_len != 10)
return -EOPNOTSUPP;
- return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
PN544_RF_READER_CMD_ACTIVATE_NEXT,
target->nfcid1, target->nfcid1_len, NULL);
} else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK |
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 604dba4f18af..8e4d355dc3ae 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -565,7 +565,7 @@ static void port100_tx_update_payload_len(void *_frame, int len)
{
struct port100_frame *frame = _frame;
- frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+ le16_add_cpu(&frame->datalen, len);
}
static bool port100_rx_frame_is_valid(void *_frame)
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index c6439638a419..b9358db83e96 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fc757ef6eadc..f5c2a5487761 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -42,14 +42,37 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
return -EINVAL;
}
+static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
+{
+ struct of_phandle_args arg;
+ int err;
+
+ err = of_parse_phandle_with_fixed_args(node, "timestamper", 1, 0, &arg);
+
+ if (err == -ENOENT)
+ return NULL;
+ else if (err)
+ return ERR_PTR(err);
+
+ if (arg.args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ return register_mii_timestamper(arg.np, arg.args[0]);
+}
+
static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
+ struct mii_timestamper *mii_ts;
struct phy_device *phy;
bool is_c45;
int rc;
u32 phy_id;
+ mii_ts = of_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
is_c45 = of_device_is_compatible(child,
"ethernet-phy-ieee802.3-c45");
@@ -57,11 +80,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
else
phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ unregister_mii_timestamper(mii_ts);
return PTR_ERR(phy);
+ }
rc = of_irq_get(child, 0);
if (rc == -EPROBE_DEFER) {
+ unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
return rc;
}
@@ -90,10 +116,12 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* register it */
rc = phy_device_register(phy);
if (rc) {
+ unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
of_node_put(child);
return rc;
}
+ phy->mii_ts = mii_ts;
dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
child, addr);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index be7a7d332332..ba43e6a3dc0a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
- kref_init(&opp_table->list_kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
@@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_unlock(&opp_table_lock);
}
-void _opp_remove_all_static(struct opp_table *opp_table)
-{
- struct dev_pm_opp *opp, *tmp;
-
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
- dev_pm_opp_put(opp);
- }
-
- opp_table->parsed_static_opps = false;
-}
-
-static void _opp_table_list_kref_release(struct kref *kref)
-{
- struct opp_table *opp_table = container_of(kref, struct opp_table,
- list_kref);
-
- _opp_remove_all_static(opp_table);
- mutex_unlock(&opp_table_lock);
-}
-
-void _put_opp_list_kref(struct opp_table *opp_table)
-{
- kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
- &opp_table_lock);
-}
-
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+ struct dev_pm_opp *opp, *tmp;
+
+ mutex_lock(&opp_table->lock);
+
+ if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+ goto unlock;
+
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (!opp->dynamic)
+ dev_pm_opp_put_unlocked(opp);
+ }
+
+unlock:
+ mutex_unlock(&opp_table->lock);
+}
+
/**
* dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
* @dev: device for which we do this operation
@@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
return;
}
- _put_opp_list_kref(opp_table);
+ _opp_remove_all_static(opp_table);
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 1cbb58240b80..9cd8f0adacae 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */
+ mutex_lock(&opp_table->lock);
if (opp_table->parsed_static_opps) {
- kref_get(&opp_table->list_kref);
+ opp_table->parsed_static_opps++;
+ mutex_unlock(&opp_table->lock);
return 0;
}
- /*
- * Re-initialize list_kref every time we add static OPPs to the OPP
- * table as the reference count may be 0 after the last tie static OPPs
- * were removed.
- */
- kref_init(&opp_table->list_kref);
+ opp_table->parsed_static_opps = 1;
+ mutex_unlock(&opp_table->lock);
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
@@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
of_node_put(np);
- return ret;
+ goto remove_static_opp;
} else if (opp) {
count++;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
+ if (WARN_ON(!count)) {
+ ret = -ENOENT;
+ goto remove_static_opp;
+ }
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
@@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
- return -ENOENT;
+ ret = -ENOENT;
+ goto remove_static_opp;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
- opp_table->parsed_static_opps = true;
-
return 0;
+
+remove_static_opp:
+ _opp_remove_all_static(opp_table);
+
+ return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
@@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
+ _opp_remove_all_static(opp_table);
return ret;
}
nr -= 2;
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 01a500e2c40a..d14e27102730 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -127,11 +127,10 @@ enum opp_table_access {
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @list_kref: for reference count of the OPP list.
* @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
- * @parsed_static_opps: True if OPPs are initialized from DT.
+ * @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -167,7 +166,6 @@ struct opp_table {
struct list_head dev_list;
struct list_head opp_list;
struct kref kref;
- struct kref list_kref;
struct mutex lock;
struct device_node *np;
@@ -176,7 +174,7 @@ struct opp_table {
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool parsed_static_opps;
+ unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 1c69c404df11..e3357e91decb 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -90,7 +90,7 @@ static int _store_optimized_voltages(struct device *dev,
goto out_map;
}
- base = ioremap_nocache(res->start, resource_size(res));
+ base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(dev, "Unable to map Efuse registers\n");
ret = -ENOMEM;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index ad290f79983b..a5507f75b524 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1534,7 +1534,7 @@ static int __init ccio_probe(struct parisc_device *dev)
*ioc_p = ioc;
ioc->hw_path = dev->hw_path;
- ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
+ ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
if (!ioc->ioc_regs) {
kfree(ioc);
return -ENOMEM;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 2f1cac89ddf5..889d7ce282eb 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -974,7 +974,7 @@ static int __init dino_probe(struct parisc_device *dev)
}
dino_dev->hba.dev = dev;
- dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+ dino_dev->hba.base_addr = ioremap(hpa, 4096);
dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 37a2c5db761d..9d00a24277aa 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -354,10 +354,10 @@ static int __init eisa_probe(struct parisc_device *dev)
eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
}
}
- eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
+ eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
if (!eisa_eeprom_addr) {
result = -ENOMEM;
- printk(KERN_ERR "EISA: ioremap_nocache failed!\n");
+ printk(KERN_ERR "EISA: ioremap failed!\n");
goto error_free_irq;
}
result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space,
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 32f506f00c89..8a3b0c3a1e92 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -927,7 +927,7 @@ void *iosapic_register(unsigned long hpa)
return NULL;
}
- isi->addr = ioremap_nocache(hpa, 4096);
+ isi->addr = ioremap(hpa, 4096);
isi->isi_hpa = hpa;
isi->isi_version = iosapic_rd_version(isi);
isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a99e385c68bd..732b516c7bf8 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1134,7 +1134,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
** Postable I/O port space is per PCI host adapter.
** base of 64MB PIOP region
*/
- lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
+ lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
(int)lba_dev->hba.bus_num.start);
@@ -1476,7 +1476,7 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *addr = ioremap(dev->hpa.start, 4096);
int max;
/* Read HW Rev First */
@@ -1575,7 +1575,7 @@ lba_driver_probe(struct parisc_device *dev)
} else {
if (!astro_iop_base) {
/* Sprockets PDC uses NPIOP region */
- astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
+ astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
pci_port = &lba_astro_port_ops;
}
@@ -1693,7 +1693,7 @@ void __init lba_init(void)
*/
void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
{
- void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
+ void __iomem * base_addr = ioremap(lba->hpa.start, 4096);
imask <<= 2; /* adjust for hints - 2 more bits */
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index de8e4e347249..7e112829d250 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1513,7 +1513,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
{
- return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+ return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
}
static void sba_hw_init(struct sba_device *sba_dev)
@@ -1883,7 +1883,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
u32 func_class;
int i;
char *version;
- void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
+ void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *root;
#endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index b20651cea09f..9bf7fa99b103 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -719,7 +719,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ base = devm_ioremap(dev, res->start, resource_size(res));
if (!base)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 3dd2e2697294..cfeccd7e9fff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -434,7 +434,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
- msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+ msix_tbl = ioremap(ep->phys_base + tbl_addr,
PCI_MSIX_ENTRY_SIZE);
if (!msix_tbl)
return -EINVAL;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index c7709e49f0e4..6b43a5455c7a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -688,7 +688,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
- return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+ return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e87196cc1a7f..df21e3227b57 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -184,7 +184,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
return NULL;
}
- return ioremap_nocache(res->start, resource_size(res));
+ return ioremap(res->start, resource_size(res));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4937a088d7d8..a3a1a0ea64f4 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1571,7 +1571,7 @@ static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
pci_read_config_dword(dev, 0xF0, &rcba);
/* use bits 31:14, 16 kB aligned */
- asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
+ asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
if (asus_rcba_base == NULL)
return;
}
@@ -4784,7 +4784,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
if (!(rcba & INTEL_LPC_RCBA_ENABLE))
return -EINVAL;
- rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
PAGE_ALIGN(INTEL_UPDCR_REG));
if (!rcba_mem)
return -ENOMEM;
@@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
/*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
*/
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- pci_info(pdev, "disabling ATS (broken on this device)\n");
+ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+ return;
+
+ pci_info(pdev, "disabling ATS\n");
pdev->ats_cap = 0;
}
/* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 55083c67b2bb..95dca2cb5265 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
- goto ddr_perf_err;
+ goto cpuhp_state_err;
}
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
- cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ goto cpuhp_instance_err;
+ }
/* Request irq */
irq = of_irq_get(np, 0);
@@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
return 0;
ddr_perf_err:
- if (pmu->cpuhp_state)
- cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
-
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
ida_simple_remove(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
@@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
irq_set_affinity_hint(pmu->irq, NULL);
perf_pmu_unregister(&pmu->pmu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 96183e31b96a..584de8f807cc 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
+
/*
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
- * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
- * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
- * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
+ *
+ * - For MT variants of TSV110:
+ * SCCL is Aff2[7:3], CCL is Aff2[2:0]
+ *
+ * - For other MT parts:
+ * SCCL is Aff3[7:0], CCL is Aff2[7:0]
+ *
+ * - For non-MT parts:
+ * SCCL is Aff2[7:0], CCL is Aff1[7:0]
*/
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
{
u64 mpidr = read_cpuid_mpidr();
-
- if (mpidr & MPIDR_MT_BITMASK) {
- if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
- if (sccl_id)
- *sccl_id = aff2 >> 3;
- if (ccl_id)
- *ccl_id = aff2 & 0x7;
- } else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- }
+ int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ bool mt = mpidr & MPIDR_MT_BITMASK;
+ int sccl, ccl;
+
+ if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+ sccl = aff2 >> 3;
+ ccl = aff2 & 0x7;
+ } else if (mt) {
+ sccl = aff3;
+ ccl = aff2;
} else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ sccl = aff2;
+ ccl = aff1;
}
+
+ if (scclp)
+ *scclp = sccl;
+ if (cclp)
+ *cclp = ccl;
}
/*
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0263db2ac874..b3ed94b98d9b 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -69,5 +69,6 @@ source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
+source "drivers/phy/intel/Kconfig"
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c96a1afc95bd..310c149a9df5 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,6 +18,7 @@ obj-y += broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
+ intel/ \
lantiq/ \
marvell/ \
motorola/ \
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 3dab79e9d52b..e760d89d3976 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -48,7 +48,8 @@ config PHY_SUN9I_USB
config PHY_SUN50I_USB3
tristate "Allwinner H6 SoC USB3 PHY driver"
- depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM && OF
depends on RESET_CONTROLLER
select GENERIC_PHY
help
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index d3d983c128ea..b29f11c19155 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -50,7 +50,7 @@ config PHY_BCM_NS_USB3
config PHY_NS2_PCIE
tristate "Broadcom Northstar2 PCIe PHY driver"
- depends on OF && MDIO_BUS_MUX_BCM_IPROC
+ depends on (OF && MDIO_BUS_MUX_BCM_IPROC) || (COMPILE_TEST && MDIO_BUS)
select GENERIC_PHY
default ARCH_BCM_IPROC
help
@@ -83,7 +83,7 @@ config PHY_BRCM_SATA
config PHY_BRCM_USB
tristate "Broadcom STB USB PHY driver"
- depends on ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index f453c7d3ffff..c78de546135c 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PHY_NS2_USB_DRD) += phy-bcm-ns2-usbdrd.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
-phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
+phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o phy-brcm-usb-init-synopsys.o
obj-$(CONFIG_PHY_BCM_SR_PCIE) += phy-bcm-sr-pcie.o
obj-$(CONFIG_PHY_BCM_SR_USB) += phy-bcm-sr-usb.o
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 50ac75bbb0c9..4710cfcc3037 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -33,6 +33,7 @@
#define SATA_PHY_CTRL_REG_28NM_SPACE_SIZE 0x8
enum brcm_sata_phy_version {
+ BRCM_SATA_PHY_STB_16NM,
BRCM_SATA_PHY_STB_28NM,
BRCM_SATA_PHY_STB_40NM,
BRCM_SATA_PHY_IPROC_NS2,
@@ -104,10 +105,13 @@ enum sata_phy_regs {
PLL1_ACTRL5 = 0x85,
PLL1_ACTRL6 = 0x86,
PLL1_ACTRL7 = 0x87,
+ PLL1_ACTRL8 = 0x88,
TX_REG_BANK = 0x070,
TX_ACTRL0 = 0x80,
TX_ACTRL0_TXPOL_FLIP = BIT(6),
+ TX_ACTRL5 = 0x85,
+ TX_ACTRL5_SSC_EN = BIT(11),
AEQRX_REG_BANK_0 = 0xd0,
AEQ_CONTROL1 = 0x81,
@@ -116,6 +120,7 @@ enum sata_phy_regs {
AEQ_FRC_EQ = 0x83,
AEQ_FRC_EQ_FORCE = BIT(0),
AEQ_FRC_EQ_FORCE_VAL = BIT(1),
+ AEQ_RFZ_FRC_VAL = BIT(8),
AEQRX_REG_BANK_1 = 0xe0,
AEQRX_SLCAL0_CTRL0 = 0x82,
AEQRX_SLCAL1_CTRL0 = 0x86,
@@ -152,7 +157,28 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_CDR_CONTROL1 = 0x81,
+ RXPMD_RX_PPM_VAL_MASK = 0x1ff,
+ RXPMD_RXPMD_EN_FRC = BIT(12),
+ RXPMD_RXPMD_EN_FRC_VAL = BIT(13),
+ RXPMD_RX_CDR_CDR_PROP_BW = 0x82,
+ RXPMD_G_CDR_PROP_BW_MASK = 0x7,
+ RXPMD_G1_CDR_PROP_BW_SHIFT = 0,
+ RXPMD_G2_CDR_PROP_BW_SHIFT = 3,
+ RXPMD_G3_CDR_PROB_BW_SHIFT = 6,
+ RXPMD_RX_CDR_CDR_ACQ_INTEG_BW = 0x83,
+ RXPMD_G_CDR_ACQ_INT_BW_MASK = 0x7,
+ RXPMD_G1_CDR_ACQ_INT_BW_SHIFT = 0,
+ RXPMD_G2_CDR_ACQ_INT_BW_SHIFT = 3,
+ RXPMD_G3_CDR_ACQ_INT_BW_SHIFT = 6,
+ RXPMD_RX_CDR_CDR_LOCK_INTEG_BW = 0x84,
+ RXPMD_G_CDR_LOCK_INT_BW_MASK = 0x7,
+ RXPMD_G1_CDR_LOCK_INT_BW_SHIFT = 0,
+ RXPMD_G2_CDR_LOCK_INT_BW_SHIFT = 3,
+ RXPMD_G3_CDR_LOCK_INT_BW_SHIFT = 6,
RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
+ RXPMD_MON_CORRECT_EN = BIT(8),
+ RXPMD_MON_MARGIN_VAL_MASK = 0xff,
};
enum sata_phy_ctrl_regs {
@@ -166,6 +192,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
u32 size = 0;
switch (priv->version) {
+ case BRCM_SATA_PHY_STB_16NM:
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_IPROC_NS2:
case BRCM_SATA_PHY_DSL_28NM:
@@ -287,6 +314,94 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
return brcm_stb_sata_rxaeq_init(port);
}
+static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp, value;
+
+ /* Reduce CP tail current to 1/16th of its default value */
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
+
+ /* Turn off CP tail current boost */
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
+
+ /* Set a specific AEQ equalizer value */
+ tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE;
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
+ ~(tmp | AEQ_RFZ_FRC_VAL |
+ AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT),
+ tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT);
+
+ /* Set RX PPM val center frequency */
+ if (port->ssc_en)
+ value = 0x52;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
+ ~RXPMD_RX_PPM_VAL_MASK, value);
+
+ /* Set proportional loop bandwith Gen1/2/3 */
+ tmp = RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ if (port->ssc_en)
+ value = 2 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ 2 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ 2 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ else
+ value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
+ value);
+
+ /* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */
+ tmp = RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+ RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+ RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+ if (port->ssc_en)
+ value = 1 << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
+ ~tmp, value);
+
+ /* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */
+ tmp = RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+ RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+ RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+ if (port->ssc_en)
+ value = 1 << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
+ ~tmp, value);
+
+ /* Set no guard band and clamp CDR */
+ tmp = RXPMD_MON_CORRECT_EN | RXPMD_MON_MARGIN_VAL_MASK;
+ if (port->ssc_en)
+ value = 0x51;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, RXPMD_MON_CORRECT_EN | value);
+
+ /* Turn on/off SSC */
+ brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
+ port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
+
+ return 0;
+}
+
+static int brcm_stb_sata_16nm_init(struct brcm_sata_port *port)
+{
+ return brcm_stb_sata_16nm_ssc_init(port);
+}
+
/* NS2 SATA PLL1 defaults were characterized by H/W group */
#define NS2_PLL1_ACTRL2_MAGIC 0x1df8
#define NS2_PLL1_ACTRL3_MAGIC 0x2b00
@@ -544,6 +659,9 @@ static int brcm_sata_phy_init(struct phy *phy)
struct brcm_sata_port *port = phy_get_drvdata(phy);
switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_16NM:
+ rc = brcm_stb_sata_16nm_init(port);
+ break;
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_STB_40NM:
rc = brcm_stb_sata_init(port);
@@ -601,6 +719,8 @@ static const struct phy_ops phy_ops = {
};
static const struct of_device_id brcm_sata_phy_of_match[] = {
+ { .compatible = "brcm,bcm7216-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_STB_16NM },
{ .compatible = "brcm,bcm7445-sata-phy",
.data = (void *)BRCM_SATA_PHY_STB_28NM },
{ .compatible = "brcm,bcm7425-sata-phy",
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
new file mode 100644
index 000000000000..456dc4a100c2
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Broadcom */
+
+/*
+ * This module contains USB PHY initialization for power up and S3 resume
+ * for newer Synopsys based USB hardware first used on the bcm7216.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/soc/brcmstb/brcmstb.h>
+#include "phy-brcm-usb-init.h"
+
+#define PHY_LOCK_TIMEOUT_MS 200
+
+/* Register definitions for syscon piarbctl registers */
+#define PIARBCTL_CAM 0x00
+#define PIARBCTL_SPLITTER 0x04
+#define PIARBCTL_MISC 0x08
+#define PIARBCTL_MISC_SECURE_MASK 0x80000000
+#define PIARBCTL_MISC_USB_SELECT_MASK 0x40000000
+#define PIARBCTL_MISC_USB_4G_SDRAM_MASK 0x20000000
+#define PIARBCTL_MISC_USB_PRIORITY_MASK 0x000f0000
+#define PIARBCTL_MISC_USB_MEM_PAGE_MASK 0x0000f000
+#define PIARBCTL_MISC_CAM1_MEM_PAGE_MASK 0x00000f00
+#define PIARBCTL_MISC_CAM0_MEM_PAGE_MASK 0x000000f0
+#define PIARBCTL_MISC_SATA_PRIORITY_MASK 0x0000000f
+
+#define PIARBCTL_MISC_USB_ONLY_MASK \
+ (PIARBCTL_MISC_USB_SELECT_MASK | \
+ PIARBCTL_MISC_USB_4G_SDRAM_MASK | \
+ PIARBCTL_MISC_USB_PRIORITY_MASK | \
+ PIARBCTL_MISC_USB_MEM_PAGE_MASK)
+
+/* Register definitions for the USB CTRL block */
+#define USB_CTRL_SETUP 0x00
+#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000
+#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000
+#define USB_CTRL_SETUP_tca_drv_sel_MASK 0x01000000
+#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000
+#define USB_CTRL_SETUP_SOFT_SHUTDOWN_MASK 0x00000200
+#define USB_CTRL_SETUP_IPP_MASK 0x00000020
+#define USB_CTRL_SETUP_IOC_MASK 0x00000010
+#define USB_CTRL_USB_PM 0x04
+#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000
+#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000
+#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000
+#define USB_CTRL_USB_PM_STATUS 0x08
+#define USB_CTRL_USB_DEVICE_CTL1 0x10
+#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003
+#define USB_CTRL_TEST_PORT_CTL 0x30
+#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_MASK 0x000000ff
+#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_PME_GEN_MASK 0x0000002e
+#define USB_CTRL_TP_DIAG1 0x34
+#define USB_CTLR_TP_DIAG1_wake_MASK 0x00000002
+#define USB_CTRL_CTLR_CSHCR 0x50
+#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK 0x00040000
+
+/* Register definitions for the USB_PHY block in 7211b0 */
+#define USB_PHY_PLL_CTL 0x00
+#define USB_PHY_PLL_CTL_PLL_RESETB_MASK 0x40000000
+#define USB_PHY_PLL_LDO_CTL 0x08
+#define USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK 0x00000004
+#define USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK 0x00000002
+#define USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK 0x00000001
+#define USB_PHY_UTMI_CTL_1 0x04
+#define USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800
+#define USB_PHY_UTMI_CTL_1_PHY_MODE_MASK 0x0000000c
+#define USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT 2
+#define USB_PHY_IDDQ 0x1c
+#define USB_PHY_IDDQ_phy_iddq_MASK 0x00000001
+#define USB_PHY_STATUS 0x20
+#define USB_PHY_STATUS_pll_lock_MASK 0x00000001
+
+/* Register definitions for the MDIO registers in the DWC2 block of
+ * the 7211b0.
+ * NOTE: The PHY's MDIO registers are only accessible through the
+ * legacy DesignWare USB controller even though it's not being used.
+ */
+#define USB_GMDIOCSR 0
+#define USB_GMDIOGEN 4
+
+/* Register definitions for the BDC EC block in 7211b0 */
+#define BDC_EC_AXIRDA 0x0c
+#define BDC_EC_AXIRDA_RTS_MASK 0xf0000000
+#define BDC_EC_AXIRDA_RTS_SHIFT 28
+
+
+static void usb_mdio_write_7211b0(struct brcm_usb_init_params *params,
+ uint8_t addr, uint16_t data)
+{
+ void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+ addr &= 0x1f; /* 5-bit address */
+ brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x59020000 | (addr << 18) | data,
+ usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+}
+
+static uint16_t __maybe_unused usb_mdio_read_7211b0(
+ struct brcm_usb_init_params *params, uint8_t addr)
+{
+ void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+ addr &= 0x1f; /* 5-bit address */
+ brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x69020000 | (addr << 18), usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ return brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & 0xffff;
+}
+
+static void usb2_eye_fix_7211b0(struct brcm_usb_init_params *params)
+{
+ /* select bank */
+ usb_mdio_write_7211b0(params, 0x1f, 0x80a0);
+
+ /* Set the eye */
+ usb_mdio_write_7211b0(params, 0x0a, 0xc6a0);
+}
+
+static void xhci_soft_reset(struct brcm_usb_init_params *params,
+ int on_off)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ /* Assert reset */
+ if (on_off)
+ USB_CTRL_UNSET(ctrl, USB_PM, XHC_SOFT_RESETB);
+ /* De-assert reset */
+ else
+ USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB);
+}
+
+static void usb_init_ipp(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+ u32 orig_reg;
+
+ pr_debug("%s\n", __func__);
+
+ orig_reg = reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
+ if (params->ipp != 2)
+ /* override ipp strap pin (if it exits) */
+ reg &= ~(USB_CTRL_MASK(SETUP, STRAP_IPP_SEL));
+
+ /* Override the default OC and PP polarity */
+ reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
+ if (params->ioc)
+ reg |= USB_CTRL_MASK(SETUP, IOC);
+ if (params->ipp == 1)
+ reg |= USB_CTRL_MASK(SETUP, IPP);
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ /*
+ * If we're changing IPP, make sure power is off long enough
+ * to turn off any connected devices.
+ */
+ if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
+ msleep(50);
+}
+
+static void syscon_piarbctl_init(struct regmap *rmap)
+{
+ /* Switch from legacy USB OTG controller to new STB USB controller */
+ regmap_update_bits(rmap, PIARBCTL_MISC, PIARBCTL_MISC_USB_ONLY_MASK,
+ PIARBCTL_MISC_USB_SELECT_MASK |
+ PIARBCTL_MISC_USB_4G_SDRAM_MASK);
+}
+
+static void usb_init_common(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ pr_debug("%s\n", __func__);
+
+ USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+
+ if (USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ reg |= params->mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+ switch (params->mode) {
+ case USB_CTLR_MODE_HOST:
+ USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ break;
+ default:
+ USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ USB_CTRL_SET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ break;
+ }
+}
+
+static void usb_wake_enable_7211b0(struct brcm_usb_init_params *params,
+ bool enable)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ if (enable)
+ USB_CTRL_SET(ctrl, CTLR_CSHCR, ctl_pme_en);
+ else
+ USB_CTRL_UNSET(ctrl, CTLR_CSHCR, ctl_pme_en);
+}
+
+static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+ void __iomem *bdc_ec = params->regs[BRCM_REGS_BDC_EC];
+ int timeout_ms = PHY_LOCK_TIMEOUT_MS;
+ u32 reg;
+
+ if (params->syscon_piarbctl)
+ syscon_piarbctl_init(params->syscon_piarbctl);
+
+ USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+
+ usb_wake_enable_7211b0(params, false);
+ if (!params->wake_enabled) {
+
+ /* undo possible suspend settings */
+ brcm_usb_writel(0, usb_phy + USB_PHY_IDDQ);
+ reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+ reg |= USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+
+ /* temporarily enable FSM so PHY comes up properly */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg |= USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+ }
+
+ /* Init the PHY */
+ reg = USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK |
+ USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK |
+ USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_LDO_CTL);
+
+ /* wait for lock */
+ while (timeout_ms-- > 0) {
+ reg = brcm_usb_readl(usb_phy + USB_PHY_STATUS);
+ if (reg & USB_PHY_STATUS_pll_lock_MASK)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Set the PHY_MODE */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg &= ~USB_PHY_UTMI_CTL_1_PHY_MODE_MASK;
+ reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+ /* Fix the incorrect default */
+ reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
+ reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
+ brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
+
+ usb_init_common(params);
+
+ /*
+ * The BDC controller will get occasional failures with
+ * the default "Read Transaction Size" of 6 (1024 bytes).
+ * Set it to 4 (256 bytes).
+ */
+ if ((params->mode != USB_CTLR_MODE_HOST) && bdc_ec) {
+ reg = brcm_usb_readl(bdc_ec + BDC_EC_AXIRDA);
+ reg &= ~BDC_EC_AXIRDA_RTS_MASK;
+ reg |= (0x4 << BDC_EC_AXIRDA_RTS_SHIFT);
+ brcm_usb_writel(reg, bdc_ec + BDC_EC_AXIRDA);
+ }
+
+ /*
+ * Disable FSM, otherwise the PHY will auto suspend when no
+ * device is connected and will be reset on resume.
+ */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg &= ~USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+ usb2_eye_fix_7211b0(params);
+}
+
+static void usb_init_xhci(struct brcm_usb_init_params *params)
+{
+ pr_debug("%s\n", __func__);
+
+ xhci_soft_reset(params, 0);
+}
+
+static void usb_uninit_common(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ pr_debug("%s\n", __func__);
+
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+
+}
+
+static void usb_uninit_common_7211b0(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ if (params->wake_enabled) {
+ USB_CTRL_SET(ctrl, TEST_PORT_CTL, TPOUT_SEL_PME_GEN);
+ usb_wake_enable_7211b0(params, true);
+ } else {
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+ brcm_usb_writel(0, usb_phy + USB_PHY_PLL_LDO_CTL);
+ reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+ reg &= ~USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+ brcm_usb_writel(USB_PHY_IDDQ_phy_iddq_MASK,
+ usb_phy + USB_PHY_IDDQ);
+ }
+
+}
+
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ if (!params->wake_enabled)
+ xhci_soft_reset(params, 1);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg = 0;
+
+ pr_debug("%s\n", __func__);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ return reg;
+}
+
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ reg |= mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+}
+
+static const struct brcm_usb_init_ops bcm7216_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+static const struct brcm_usb_init_ops bcm7211b0_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common_7211b0,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common_7211b0,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ params->family_name = "7216";
+ params->ops = &bcm7216_ops;
+}
+
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ params->family_name = "7211";
+ params->ops = &bcm7211b0_ops;
+ params->suspend_with_clocks = true;
+}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 91b5b09589d6..9391ab42a12b 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -42,6 +42,7 @@
#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
#define USB_CTRL_EBRIDGE 0x0c
#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
+#define USB_CTRL_EBRIDGE_EBR_SCB_SIZE_MASK 0x00000f80 /* option */
#define USB_CTRL_OBRIDGE 0x10
#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000
#define USB_CTRL_MDIO 0x14
@@ -57,6 +58,8 @@
#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 /* option */
#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK 0x30000000 /* option */
#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK 0x00300000 /* option */
+#define USB_CTRL_USB_PM_RMTWKUP_EN_MASK 0x00000001
+#define USB_CTRL_USB_PM_STATUS 0x38
#define USB_CTRL_USB30_CTL1 0x60
#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK 0x00000010
#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK 0x00010000
@@ -126,10 +129,6 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
-#define USB_CTRL_MASK(reg, field) \
- USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
(params->usb_reg_bits_map[USB_CTRL_##reg##_##field##_SELECTOR])
@@ -140,13 +139,6 @@ enum {
usb_ctrl_unset_family(params, USB_CTRL_##reg, \
USB_CTRL_##reg##_##field##_SELECTOR)
-#define USB_CTRL_SET(base, reg, field) \
- usb_ctrl_set(USB_CTRL_REG(base, reg), \
- USB_CTRL_##reg##_##field##_MASK)
-#define USB_CTRL_UNSET(base, reg, field) \
- usb_ctrl_unset(USB_CTRL_REG(base, reg), \
- USB_CTRL_##reg##_##field##_MASK)
-
#define MDIO_USB2 0
#define MDIO_USB3 BIT(31)
@@ -176,6 +168,7 @@ static const struct id_to_type id_to_type_table[] = {
{ 0x33900000, BRCM_FAMILY_3390A0 },
{ 0x72500010, BRCM_FAMILY_7250B0 },
{ 0x72600000, BRCM_FAMILY_7260A0 },
+ { 0x72550000, BRCM_FAMILY_7260A0 },
{ 0x72680000, BRCM_FAMILY_7271A0 },
{ 0x72710000, BRCM_FAMILY_7271A0 },
{ 0x73640000, BRCM_FAMILY_7364A0 },
@@ -401,26 +394,14 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
},
};
-static inline u32 brcmusb_readl(void __iomem *addr)
-{
- return readl(addr);
-}
-
-static inline void brcmusb_writel(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
static inline
void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void __iomem *reg;
mask = params->usb_reg_bits_map[field];
- reg = params->ctrl_regs + reg_offset;
- brcmusb_writel(brcmusb_readl(reg) & ~mask, reg);
+ brcm_usb_ctrl_unset(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
};
static inline
@@ -428,45 +409,27 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void __iomem *reg;
mask = params->usb_reg_bits_map[field];
- reg = params->ctrl_regs + reg_offset;
- brcmusb_writel(brcmusb_readl(reg) | mask, reg);
+ brcm_usb_ctrl_set(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
};
-static inline void usb_ctrl_set(void __iomem *reg, u32 field)
-{
- u32 value;
-
- value = brcmusb_readl(reg);
- brcmusb_writel(value | field, reg);
-}
-
-static inline void usb_ctrl_unset(void __iomem *reg, u32 field)
-{
- u32 value;
-
- value = brcmusb_readl(reg);
- brcmusb_writel(value & ~field, reg);
-}
-
static u32 brcmusb_usb_mdio_read(void __iomem *ctrl_base, u32 reg, int mode)
{
u32 data;
data = (reg << 16) | mode;
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data |= (1 << 24);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data &= ~(1 << 24);
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- return brcmusb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
+ return brcm_usb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
}
static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
@@ -475,14 +438,14 @@ static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
u32 data;
data = (reg << 16) | val | mode;
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data |= (1 << 25);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data &= ~(1 << 25);
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
}
@@ -581,7 +544,7 @@ static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
{
u32 ofs;
int ii;
- void __iomem *ctrl_base = params->ctrl_regs;
+ void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
/*
* On newer B53 based SoC's, the reference clock for the
@@ -662,7 +625,7 @@ static void brcmusb_usb3_ssc_enable(void __iomem *ctrl_base)
static void brcmusb_usb3_phy_workarounds(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl_base = params->ctrl_regs;
+ void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
brcmusb_usb3_pll_fix(ctrl_base);
brcmusb_usb3_pll_54mhz(params);
@@ -704,21 +667,21 @@ static void brcmusb_memc_fix(struct brcm_usb_init_params *params)
static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
{
- void __iomem *xhci_ec_base = params->xhci_ec_regs;
+ void __iomem *xhci_ec_base = params->regs[BRCM_REGS_XHCI_EC];
u32 val;
if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
- brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
- val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+ brcm_usb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
+ val = brcm_usb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
/* set cfg_pick_ss_lock */
val |= (1 << 27);
- brcmusb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+ brcm_usb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
/* Reset USB 3.0 PHY for workaround to take effect */
- USB_CTRL_UNSET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
- USB_CTRL_SET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_UNSET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
}
static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
@@ -747,7 +710,7 @@ static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
* - default chip/rev.
* NOTE: The minor rev is always ignored.
*/
-static enum brcm_family_type brcmusb_get_family_type(
+static enum brcm_family_type get_family_type(
struct brcm_usb_init_params *params)
{
int last_type = -1;
@@ -775,9 +738,9 @@ static enum brcm_family_type brcmusb_get_family_type(
return last_type;
}
-void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
+static void usb_init_ipp(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
u32 reg;
u32 orig_reg;
@@ -791,7 +754,7 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IPP);
}
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
orig_reg = reg;
if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_CC_DRD_MODE_ENABLE_SEL))
/* Never use the strap, it's going away. */
@@ -799,8 +762,8 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
SETUP,
STRAP_CC_DRD_MODE_ENABLE_SEL));
if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_IPP_SEL))
+ /* override ipp strap pin (if it exits) */
if (params->ipp != 2)
- /* override ipp strap pin (if it exits) */
reg &= ~(USB_CTRL_MASK_FAMILY(params, SETUP,
STRAP_IPP_SEL));
@@ -808,50 +771,38 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
if (params->ioc)
reg |= USB_CTRL_MASK(SETUP, IOC);
- if (params->ipp == 1 && ((reg & USB_CTRL_MASK(SETUP, IPP)) == 0))
+ if (params->ipp == 1)
reg |= USB_CTRL_MASK(SETUP, IPP);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
/*
* If we're changing IPP, make sure power is off long enough
* to turn off any connected devices.
*/
- if (reg != orig_reg)
+ if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
msleep(50);
}
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params)
+static void usb_wake_enable(struct brcm_usb_init_params *params,
+ bool enable)
{
- void __iomem *ctrl = params->ctrl_regs;
- u32 reg = 0;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
- if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
- PORT_MODE);
- }
- return reg;
+ if (enable)
+ USB_CTRL_SET(ctrl, USB_PM, RMTWKUP_EN);
+ else
+ USB_CTRL_UNSET(ctrl, USB_PM, RMTWKUP_EN);
}
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
- int mode)
+static void usb_init_common(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
u32 reg;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
- if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
- PORT_MODE);
- reg |= mode;
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- }
-}
-
-void brcm_usb_init_common(struct brcm_usb_init_params *params)
-{
- u32 reg;
- void __iomem *ctrl = params->ctrl_regs;
+ /* Clear any pending wake conditions */
+ usb_wake_enable(params, false);
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_PM_STATUS));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_PM_STATUS));
/* Take USB out of power down */
if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN)) {
@@ -877,7 +828,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
/* Block auto PLL suspend by USB2 PHY (Sasi) */
USB_CTRL_SET(ctrl, PLL_CTL, PLL_SUSPEND_EN);
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
if (params->selected_family == BRCM_FAMILY_7364A0)
/* Suppress overcurrent indication from USB30 ports for A0 */
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, OC3_DISABLE);
@@ -893,16 +844,16 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN);
if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN))
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
brcmusb_memc_fix(params);
if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
PORT_MODE);
reg |= params->mode;
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
}
if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) {
switch (params->mode) {
@@ -924,10 +875,10 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
}
}
-void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
+static void usb_init_eohci(struct brcm_usb_init_params *params)
{
u32 reg;
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
USB_CTRL_SET_FAMILY(params, USB_PM, USB20_HC_RESETB);
@@ -940,19 +891,30 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ);
/* Setup the endian bits */
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
if (params->selected_family == BRCM_FAMILY_7271A0)
/* Enable LS keep alive fix for certain keyboards */
USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE);
+
+ if (params->family_id == 0x72550000) {
+ /*
+ * Make the burst size 512 bytes to fix a hardware bug
+ * on the 7255a0. See HW7255-24.
+ */
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, EBRIDGE));
+ reg &= ~USB_CTRL_MASK(EBRIDGE, EBR_SCB_SIZE);
+ reg |= 0x800;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, EBRIDGE));
+ }
}
-void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
+static void usb_init_xhci(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
/* 1 millisecond - for USB clocks to settle down */
@@ -978,34 +940,80 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
brcmusb_usb3_otp_fix(params);
}
-void brcm_usb_uninit_common(struct brcm_usb_init_params *params)
+static void usb_uninit_common(struct brcm_usb_init_params *params)
{
if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN))
USB_CTRL_SET_FAMILY(params, USB_PM, USB_PWRDN);
if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN))
USB_CTRL_SET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+ if (params->wake_enabled)
+ usb_wake_enable(params, true);
}
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
+static void usb_uninit_eohci(struct brcm_usb_init_params *params)
{
- if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
- USB_CTRL_UNSET_FAMILY(params, USB_PM, USB20_HC_RESETB);
}
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
{
brcmusb_xhci_soft_reset(params, 1);
- USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+ USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_PCTL,
+ PHY3_IDDQ_OVERRIDE);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg = 0;
+
+ pr_debug("%s\n", __func__);
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ }
+ return reg;
}
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+}
+
+static const struct brcm_usb_init_ops bcm7445_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common,
+ .init_eohci = usb_init_eohci,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common,
+ .uninit_eohci = usb_uninit_eohci,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params)
{
int fam;
- fam = brcmusb_get_family_type(params);
+ pr_debug("%s\n", __func__);
+
+ fam = get_family_type(params);
params->selected_family = fam;
params->usb_reg_bits_map =
&usb_reg_bits_map_table[fam][0];
params->family_name = family_names[fam];
+ params->ops = &bcm7445_ops;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index f4f4f6d5d258..899b9eb43fad 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -6,16 +6,50 @@
#ifndef _USB_BRCM_COMMON_INIT_H
#define _USB_BRCM_COMMON_INIT_H
+#include <linux/regmap.h>
+
#define USB_CTLR_MODE_HOST 0
#define USB_CTLR_MODE_DEVICE 1
#define USB_CTLR_MODE_DRD 2
#define USB_CTLR_MODE_TYPEC_PD 3
+enum brcmusb_reg_sel {
+ BRCM_REGS_CTRL = 0,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ BRCM_REGS_USB_PHY,
+ BRCM_REGS_USB_MDIO,
+ BRCM_REGS_BDC_EC,
+ BRCM_REGS_MAX
+};
+
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_MASK(reg, field) \
+ USB_CTRL_##reg##_##field##_MASK
+#define USB_CTRL_SET(base, reg, field) \
+ brcm_usb_ctrl_set(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+#define USB_CTRL_UNSET(base, reg, field) \
+ brcm_usb_ctrl_unset(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+
struct brcm_usb_init_params;
+struct brcm_usb_init_ops {
+ void (*init_ipp)(struct brcm_usb_init_params *params);
+ void (*init_common)(struct brcm_usb_init_params *params);
+ void (*init_eohci)(struct brcm_usb_init_params *params);
+ void (*init_xhci)(struct brcm_usb_init_params *params);
+ void (*uninit_common)(struct brcm_usb_init_params *params);
+ void (*uninit_eohci)(struct brcm_usb_init_params *params);
+ void (*uninit_xhci)(struct brcm_usb_init_params *params);
+ int (*get_dual_select)(struct brcm_usb_init_params *params);
+ void (*set_dual_select)(struct brcm_usb_init_params *params, int mode);
+};
+
struct brcm_usb_init_params {
- void __iomem *ctrl_regs;
- void __iomem *xhci_ec_regs;
+ void __iomem *regs[BRCM_REGS_MAX];
int ioc;
int ipp;
int mode;
@@ -24,19 +58,105 @@ struct brcm_usb_init_params {
int selected_family;
const char *family_name;
const u32 *usb_reg_bits_map;
+ const struct brcm_usb_init_ops *ops;
+ struct regmap *syscon_piarbctl;
+ bool wake_enabled;
+ bool suspend_with_clocks;
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params);
+
+static inline u32 brcm_usb_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcm_usb_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+static inline void brcm_usb_ctrl_unset(void __iomem *reg, u32 mask)
+{
+ brcm_usb_writel(brcm_usb_readl(reg) & ~(mask), reg);
};
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params);
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params);
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
- int mode);
-
-void brcm_usb_init_ipp(struct brcm_usb_init_params *ini);
-void brcm_usb_init_common(struct brcm_usb_init_params *ini);
-void brcm_usb_init_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_init_xhci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_common(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini);
+static inline void brcm_usb_ctrl_set(void __iomem *reg, u32 mask)
+{
+ brcm_usb_writel(brcm_usb_readl(reg) | (mask), reg);
+};
+
+static inline void brcm_usb_init_ipp(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_ipp)
+ ini->ops->init_ipp(ini);
+}
+
+static inline void brcm_usb_init_common(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_common)
+ ini->ops->init_common(ini);
+}
+
+static inline void brcm_usb_init_eohci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_eohci)
+ ini->ops->init_eohci(ini);
+}
+
+static inline void brcm_usb_init_xhci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_xhci)
+ ini->ops->init_xhci(ini);
+}
+
+static inline void brcm_usb_uninit_common(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_common)
+ ini->ops->uninit_common(ini);
+}
+
+static inline void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_eohci)
+ ini->ops->uninit_eohci(ini);
+}
+
+static inline void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_xhci)
+ ini->ops->uninit_xhci(ini);
+}
+
+static inline int brcm_usb_get_dual_select(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->get_dual_select)
+ return ini->ops->get_dual_select(ini);
+ return 0;
+}
+
+static inline void brcm_usb_set_dual_select(struct brcm_usb_init_params *ini,
+ int mode)
+{
+ if (ini->ops->set_dual_select)
+ ini->ops->set_dual_select(ini, mode);
+}
#endif /* _USB_BRCM_COMMON_INIT_H */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index f5c1f2983a1d..491bbd46c5b3 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
+#include <linux/mfd/syscon.h>
#include "phy-brcm-usb-init.h"
@@ -32,6 +33,12 @@ struct value_to_name_map {
const char *name;
};
+struct match_chip_info {
+ void *init_func;
+ u8 required_regs[BRCM_REGS_MAX + 1];
+ u8 optional_reg;
+};
+
static struct value_to_name_map brcm_dr_mode_to_name[] = {
{ USB_CTLR_MODE_HOST, "host" },
{ USB_CTLR_MODE_DEVICE, "peripheral" },
@@ -57,11 +64,26 @@ struct brcm_usb_phy_data {
bool has_xhci;
struct clk *usb_20_clk;
struct clk *usb_30_clk;
+ struct clk *suspend_clk;
struct mutex mutex; /* serialize phy init */
int init_count;
+ int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
};
+static s8 *node_reg_names[BRCM_REGS_MAX] = {
+ "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
+};
+
+static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
+{
+ struct phy *gphy = dev_id;
+
+ pm_wakeup_event(&gphy->dev, 0);
+
+ return IRQ_HANDLED;
+}
+
static int brcm_usb_phy_init(struct phy *gphy)
{
struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
@@ -74,8 +96,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
*/
mutex_lock(&priv->mutex);
if (priv->init_count++ == 0) {
- clk_enable(priv->usb_20_clk);
- clk_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->usb_20_clk);
+ clk_prepare_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->suspend_clk);
brcm_usb_init_common(&priv->ini);
}
mutex_unlock(&priv->mutex);
@@ -106,8 +129,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
mutex_lock(&priv->mutex);
if (--priv->init_count == 0) {
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
+ clk_disable_unprepare(priv->suspend_clk);
}
mutex_unlock(&priv->mutex);
phy->inited = false;
@@ -194,7 +218,7 @@ static ssize_t dual_select_store(struct device *dev,
res = name_to_value(&brcm_dual_mode_to_name[0],
ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value);
if (!res) {
- brcm_usb_init_set_dual_select(&priv->ini, value);
+ brcm_usb_set_dual_select(&priv->ini, value);
res = len;
}
mutex_unlock(&sysfs_lock);
@@ -209,7 +233,7 @@ static ssize_t dual_select_show(struct device *dev,
int value;
mutex_lock(&sysfs_lock);
- value = brcm_usb_init_get_dual_select(&priv->ini);
+ value = brcm_usb_get_dual_select(&priv->ini);
mutex_unlock(&sysfs_lock);
return sprintf(buf, "%s\n",
value_to_name(&brcm_dual_mode_to_name[0],
@@ -228,15 +252,106 @@ static const struct attribute_group brcm_usb_phy_group = {
.attrs = brcm_usb_phy_attrs,
};
-static int brcm_usb_phy_dvr_init(struct device *dev,
+static struct match_chip_info chip_info_7216 = {
+ .init_func = &brcm_usb_dvr_init_7216,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ -1,
+ },
+};
+
+static struct match_chip_info chip_info_7211b0 = {
+ .init_func = &brcm_usb_dvr_init_7211b0,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ BRCM_REGS_USB_PHY,
+ BRCM_REGS_USB_MDIO,
+ -1,
+ },
+ .optional_reg = BRCM_REGS_BDC_EC,
+};
+
+static struct match_chip_info chip_info_7445 = {
+ .init_func = &brcm_usb_dvr_init_7445,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ -1,
+ },
+};
+
+static const struct of_device_id brcm_usb_dt_ids[] = {
+ {
+ .compatible = "brcm,bcm7216-usb-phy",
+ .data = &chip_info_7216,
+ },
+ {
+ .compatible = "brcm,bcm7211-usb-phy",
+ .data = &chip_info_7211b0,
+ },
+ {
+ .compatible = "brcm,brcmstb-usb-phy",
+ .data = &chip_info_7445,
+ },
+ { /* sentinel */ }
+};
+
+static int brcm_usb_get_regs(struct platform_device *pdev,
+ enum brcmusb_reg_sel regs,
+ struct brcm_usb_init_params *ini,
+ bool optional)
+{
+ struct resource *res;
+
+ /* Older DT nodes have ctrl and optional xhci_ec by index only */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ node_reg_names[regs]);
+ if (res == NULL) {
+ if (regs == BRCM_REGS_CTRL) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ } else if (regs == BRCM_REGS_XHCI_EC) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /* XHCI_EC registers are optional */
+ if (res == NULL)
+ return 0;
+ }
+ if (res == NULL) {
+ if (optional) {
+ dev_dbg(&pdev->dev,
+ "Optional reg %s not found\n",
+ node_reg_names[regs]);
+ return 0;
+ }
+ dev_err(&pdev->dev, "can't get %s base addr\n",
+ node_reg_names[regs]);
+ return 1;
+ }
+ }
+ ini->regs[regs] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ini->regs[regs])) {
+ dev_err(&pdev->dev, "can't map %s register space\n",
+ node_reg_names[regs]);
+ return 1;
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
struct brcm_usb_phy_data *priv,
struct device_node *dn)
{
- struct phy *gphy;
+ struct device *dev = &pdev->dev;
+ struct phy *gphy = NULL;
int err;
priv->usb_20_clk = of_clk_get_by_name(dn, "sw_usb");
if (IS_ERR(priv->usb_20_clk)) {
+ if (PTR_ERR(priv->usb_20_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev, "Clock not found in Device Tree\n");
priv->usb_20_clk = NULL;
}
@@ -267,6 +382,8 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
priv->usb_30_clk = of_clk_get_by_name(dn, "sw_usb3");
if (IS_ERR(priv->usb_30_clk)) {
+ if (PTR_ERR(priv->usb_30_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev,
"USB3.0 clock not found in Device Tree\n");
priv->usb_30_clk = NULL;
@@ -275,18 +392,46 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
if (err)
return err;
}
+
+ priv->suspend_clk = clk_get(dev, "usb0_freerun");
+ if (IS_ERR(priv->suspend_clk)) {
+ if (PTR_ERR(priv->suspend_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "Suspend Clock not found in Device Tree\n");
+ priv->suspend_clk = NULL;
+ }
+
+ priv->wake_irq = platform_get_irq_byname(pdev, "wake");
+ if (priv->wake_irq < 0)
+ priv->wake_irq = platform_get_irq_byname(pdev, "wakeup");
+ if (priv->wake_irq >= 0) {
+ err = devm_request_irq(dev, priv->wake_irq,
+ brcm_usb_phy_wake_isr, 0,
+ dev_name(dev), gphy);
+ if (err < 0)
+ return err;
+ device_set_wakeup_capable(dev, 1);
+ } else {
+ dev_info(dev,
+ "Wake interrupt missing, system wake not supported\n");
+ }
+
return 0;
}
static int brcm_usb_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct brcm_usb_phy_data *priv;
struct phy_provider *phy_provider;
struct device_node *dn = pdev->dev.of_node;
int err;
const char *mode;
+ const struct of_device_id *match;
+ void (*dvr_init)(struct brcm_usb_init_params *params);
+ const struct match_chip_info *info;
+ struct regmap *rmap;
+ int x;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -295,30 +440,14 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
priv->ini.family_id = brcmstb_get_family_id();
priv->ini.product_id = brcmstb_get_product_id();
- brcm_usb_set_family_map(&priv->ini);
+
+ match = of_match_node(brcm_usb_dt_ids, dev->of_node);
+ info = match->data;
+ dvr_init = info->init_func;
+ (*dvr_init)(&priv->ini);
+
dev_dbg(dev, "Best mapping table is for %s\n",
priv->ini.family_name);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "can't get USB_CTRL base address\n");
- return -EINVAL;
- }
- priv->ini.ctrl_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->ini.ctrl_regs)) {
- dev_err(dev, "can't map CTRL register space\n");
- return -EINVAL;
- }
-
- /* The XHCI EC registers are optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- priv->ini.xhci_ec_regs =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->ini.xhci_ec_regs)) {
- dev_err(dev, "can't map XHCI EC register space\n");
- return -EINVAL;
- }
- }
of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp);
of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc);
@@ -335,7 +464,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (of_property_read_bool(dn, "brcm,has-eohci"))
priv->has_eohci = true;
- err = brcm_usb_phy_dvr_init(dev, priv, dn);
+ for (x = 0; x < BRCM_REGS_MAX; x++) {
+ if (info->required_regs[x] >= BRCM_REGS_MAX)
+ break;
+
+ err = brcm_usb_get_regs(pdev, info->required_regs[x],
+ &priv->ini, false);
+ if (err)
+ return -EINVAL;
+ }
+ if (info->optional_reg) {
+ err = brcm_usb_get_regs(pdev, info->optional_reg,
+ &priv->ini, true);
+ if (err)
+ return -EINVAL;
+ }
+
+ err = brcm_usb_phy_dvr_init(pdev, priv, dn);
if (err)
return err;
@@ -354,14 +499,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (err)
dev_warn(dev, "Error creating sysfs attributes\n");
+ /* Get piarbctl syscon if it exists */
+ rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "syscon-piarbctl");
+ if (IS_ERR(rmap))
+ rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "brcm,syscon-piarbctl");
+ if (!IS_ERR(rmap))
+ priv->ini.syscon_piarbctl = rmap;
+
/* start with everything off */
if (priv->has_xhci)
brcm_usb_uninit_xhci(&priv->ini);
if (priv->has_eohci)
brcm_usb_uninit_eohci(&priv->ini);
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
@@ -381,8 +535,28 @@ static int brcm_usb_phy_suspend(struct device *dev)
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ priv->ini.wake_enabled = device_may_wakeup(dev);
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+
+ /*
+ * Handle the clocks unless needed for wake. This has
+ * to work for both older XHCI->3.0-clks, EOHCI->2.0-clks
+ * and newer XHCI->2.0-clks/3.0-clks.
+ */
+
+ if (!priv->ini.suspend_with_clocks) {
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited ||
+ !priv->has_eohci)
+ clk_disable_unprepare(priv->usb_20_clk);
+ }
+ if (priv->wake_irq >= 0)
+ enable_irq_wake(priv->wake_irq);
}
return 0;
}
@@ -391,8 +565,8 @@ static int brcm_usb_phy_resume(struct device *dev)
{
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
- clk_enable(priv->usb_20_clk);
- clk_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->usb_20_clk);
+ clk_prepare_enable(priv->usb_30_clk);
brcm_usb_init_ipp(&priv->ini);
/*
@@ -400,18 +574,22 @@ static int brcm_usb_phy_resume(struct device *dev)
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ if (priv->wake_irq >= 0)
+ disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
if (priv->phys[BRCM_USB_PHY_2_0].inited) {
brcm_usb_init_eohci(&priv->ini);
} else if (priv->has_eohci) {
brcm_usb_uninit_eohci(&priv->ini);
- clk_disable(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
}
if (priv->phys[BRCM_USB_PHY_3_0].inited) {
brcm_usb_init_xhci(&priv->ini);
} else if (priv->has_xhci) {
brcm_usb_uninit_xhci(&priv->ini);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (!priv->has_eohci)
+ clk_disable_unprepare(priv->usb_20_clk);
}
} else {
if (priv->has_xhci)
@@ -419,10 +597,10 @@ static int brcm_usb_phy_resume(struct device *dev)
if (priv->has_eohci)
brcm_usb_uninit_eohci(&priv->ini);
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
}
-
+ priv->ini.wake_enabled = false;
return 0;
}
#endif /* CONFIG_PM_SLEEP */
@@ -431,11 +609,6 @@ static const struct dev_pm_ops brcm_usb_phy_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(brcm_usb_phy_suspend, brcm_usb_phy_resume)
};
-static const struct of_device_id brcm_usb_dt_ids[] = {
- { .compatible = "brcm,brcmstb-usb-phy" },
- { /* sentinel */ }
-};
-
MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
static struct platform_driver brcm_usb_driver = {
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index de10402f2931..a5c08e5bd2bf 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -22,48 +22,134 @@
#include <dt-bindings/phy/phy.h>
/* PHY register offsets */
-#define SIERRA_PHY_PLL_CFG (0xc00e << 2)
-#define SIERRA_DET_STANDEC_A (0x4000 << 2)
-#define SIERRA_DET_STANDEC_B (0x4001 << 2)
-#define SIERRA_DET_STANDEC_C (0x4002 << 2)
-#define SIERRA_DET_STANDEC_D (0x4003 << 2)
-#define SIERRA_DET_STANDEC_E (0x4004 << 2)
-#define SIERRA_PSM_LANECAL (0x4008 << 2)
-#define SIERRA_PSM_DIAG (0x4015 << 2)
-#define SIERRA_PSC_TX_A0 (0x4028 << 2)
-#define SIERRA_PSC_TX_A1 (0x4029 << 2)
-#define SIERRA_PSC_TX_A2 (0x402A << 2)
-#define SIERRA_PSC_TX_A3 (0x402B << 2)
-#define SIERRA_PSC_RX_A0 (0x4030 << 2)
-#define SIERRA_PSC_RX_A1 (0x4031 << 2)
-#define SIERRA_PSC_RX_A2 (0x4032 << 2)
-#define SIERRA_PSC_RX_A3 (0x4033 << 2)
-#define SIERRA_PLLCTRL_SUBRATE (0x403A << 2)
-#define SIERRA_PLLCTRL_GEN_D (0x403E << 2)
-#define SIERRA_DRVCTRL_ATTEN (0x406A << 2)
-#define SIERRA_CLKPATHCTRL_TMR (0x4081 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE1 (0x4087 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE0 (0x4088 << 2)
-#define SIERRA_CREQ_CCLKDET_MODE01 (0x408E << 2)
-#define SIERRA_RX_CTLE_MAINTENANCE (0x4091 << 2)
-#define SIERRA_CREQ_FSMCLK_SEL (0x4092 << 2)
-#define SIERRA_CTLELUT_CTRL (0x4098 << 2)
-#define SIERRA_DFE_ECMP_RATESEL (0x40C0 << 2)
-#define SIERRA_DFE_SMP_RATESEL (0x40C1 << 2)
-#define SIERRA_DEQ_VGATUNE_CTRL (0x40E1 << 2)
-#define SIERRA_TMRVAL_MODE3 (0x416E << 2)
-#define SIERRA_TMRVAL_MODE2 (0x416F << 2)
-#define SIERRA_TMRVAL_MODE1 (0x4170 << 2)
-#define SIERRA_TMRVAL_MODE0 (0x4171 << 2)
-#define SIERRA_PICNT_MODE1 (0x4174 << 2)
-#define SIERRA_CPI_OUTBUF_RATESEL (0x417C << 2)
-#define SIERRA_LFPSFILT_NS (0x418A << 2)
-#define SIERRA_LFPSFILT_RD (0x418B << 2)
-#define SIERRA_LFPSFILT_MP (0x418C << 2)
-#define SIERRA_SDFILT_H2L_A (0x4191 << 2)
-
-#define SIERRA_MACRO_ID 0x00007364
-#define SIERRA_MAX_LANES 4
+#define SIERRA_COMMON_CDB_OFFSET 0x0
+#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
+#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
+#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+
+#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x4000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define SIERRA_DET_STANDEC_A_PREG 0x000
+#define SIERRA_DET_STANDEC_B_PREG 0x001
+#define SIERRA_DET_STANDEC_C_PREG 0x002
+#define SIERRA_DET_STANDEC_D_PREG 0x003
+#define SIERRA_DET_STANDEC_E_PREG 0x004
+#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
+#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_TX_A0_PREG 0x028
+#define SIERRA_PSC_TX_A1_PREG 0x029
+#define SIERRA_PSC_TX_A2_PREG 0x02A
+#define SIERRA_PSC_TX_A3_PREG 0x02B
+#define SIERRA_PSC_RX_A0_PREG 0x030
+#define SIERRA_PSC_RX_A1_PREG 0x031
+#define SIERRA_PSC_RX_A2_PREG 0x032
+#define SIERRA_PSC_RX_A3_PREG 0x033
+#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
+#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
+#define SIERRA_PLLCTRL_STATUS_PREG 0x044
+#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
+#define SIERRA_DFE_BIASTRIM_PREG 0x04C
+#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
+#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
+#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
+#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
+#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
+#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
+#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
+#define SIERRA_CREQ_SPARE_PREG 0x096
+#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
+#define SIERRA_CTLELUT_CTRL_PREG 0x098
+#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
+#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
+#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
+#define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8
+#define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9
+#define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD
+#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE
+#define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0
+#define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8
+#define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0
+#define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1
+#define SIERRA_DEQ_GLUT0 0x0E8
+#define SIERRA_DEQ_GLUT1 0x0E9
+#define SIERRA_DEQ_GLUT2 0x0EA
+#define SIERRA_DEQ_GLUT3 0x0EB
+#define SIERRA_DEQ_GLUT4 0x0EC
+#define SIERRA_DEQ_GLUT5 0x0ED
+#define SIERRA_DEQ_GLUT6 0x0EE
+#define SIERRA_DEQ_GLUT7 0x0EF
+#define SIERRA_DEQ_GLUT8 0x0F0
+#define SIERRA_DEQ_GLUT9 0x0F1
+#define SIERRA_DEQ_GLUT10 0x0F2
+#define SIERRA_DEQ_GLUT11 0x0F3
+#define SIERRA_DEQ_GLUT12 0x0F4
+#define SIERRA_DEQ_GLUT13 0x0F5
+#define SIERRA_DEQ_GLUT14 0x0F6
+#define SIERRA_DEQ_GLUT15 0x0F7
+#define SIERRA_DEQ_GLUT16 0x0F8
+#define SIERRA_DEQ_ALUT0 0x108
+#define SIERRA_DEQ_ALUT1 0x109
+#define SIERRA_DEQ_ALUT2 0x10A
+#define SIERRA_DEQ_ALUT3 0x10B
+#define SIERRA_DEQ_ALUT4 0x10C
+#define SIERRA_DEQ_ALUT5 0x10D
+#define SIERRA_DEQ_ALUT6 0x10E
+#define SIERRA_DEQ_ALUT7 0x10F
+#define SIERRA_DEQ_ALUT8 0x110
+#define SIERRA_DEQ_ALUT9 0x111
+#define SIERRA_DEQ_ALUT10 0x112
+#define SIERRA_DEQ_ALUT11 0x113
+#define SIERRA_DEQ_ALUT12 0x114
+#define SIERRA_DEQ_ALUT13 0x115
+#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_PICTRL_PREG 0x161
+#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
+#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
+#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
+#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
+#define SIERRA_LFPSFILT_NS_PREG 0x18A
+#define SIERRA_LFPSFILT_RD_PREG 0x18B
+#define SIERRA_LFPSFILT_MP_PREG 0x18C
+#define SIERRA_SIGDET_SUPPORT_PREG 0x190
+#define SIERRA_SDFILT_H2L_A_PREG 0x191
+#define SIERRA_SDFILT_L2H_PREG 0x193
+#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
+#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
+#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
+#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+
+#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PLL_CFG 0xe
+
+#define SIERRA_MACRO_ID 0x00007364
+#define SIERRA_MAX_LANES 16
+#define PLL_LOCK_TIME 100000
+
+static const struct reg_field macro_id_type =
+ REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
+static const struct reg_field phy_pll_cfg_1 =
+ REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pllctrl_lock =
+ REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
struct cdns_sierra_inst {
struct phy *phy;
@@ -80,53 +166,172 @@ struct cdns_reg_pairs {
struct cdns_sierra_data {
u32 id_value;
- u32 pcie_regs;
- u32 usb_regs;
- struct cdns_reg_pairs *pcie_vals;
- struct cdns_reg_pairs *usb_vals;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ u32 pcie_cmn_regs;
+ u32 pcie_ln_regs;
+ u32 usb_cmn_regs;
+ u32 usb_ln_regs;
+ struct cdns_reg_pairs *pcie_cmn_vals;
+ struct cdns_reg_pairs *pcie_ln_vals;
+ struct cdns_reg_pairs *usb_cmn_vals;
+ struct cdns_reg_pairs *usb_ln_vals;
};
-struct cdns_sierra_phy {
+struct cdns_regmap_cdb_context {
struct device *dev;
void __iomem *base;
+ u8 reg_offset_shift;
+};
+
+struct cdns_sierra_phy {
+ struct device *dev;
+ struct regmap *regmap;
struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
+ struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_common_cdb;
+ struct regmap_field *macro_id_type;
+ struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
struct clk *clk;
+ struct clk *cmn_refclk_dig_div;
+ struct clk *cmn_refclk1_dig_div;
int nsubnodes;
+ u32 num_lanes;
bool autoconf;
};
-static void cdns_sierra_phy_init(struct phy *gphy)
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ writew(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ *val = readw(ctx->base + offset);
+ return 0;
+}
+
+#define SIERRA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static struct regmap_config cdns_sierra_lane_cdb_config[] = {
+ SIERRA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static struct regmap_config cdns_sierra_common_cdb_config = {
+ .name = "sierra_common_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_sierra_phy_config_ctrl_config = {
+ .name = "sierra_phy_config_ctrl",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ struct regmap *regmap;
int i, j;
- struct cdns_reg_pairs *vals;
- u32 num_regs;
+ struct cdns_reg_pairs *cmn_vals, *ln_vals;
+ u32 num_cmn_regs, num_ln_regs;
+
+ /* Initialise the PHY registers, unless auto configured */
+ if (phy->autoconf)
+ return 0;
+ clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
+ clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
if (ins->phy_type == PHY_TYPE_PCIE) {
- num_regs = phy->init_data->pcie_regs;
- vals = phy->init_data->pcie_vals;
+ num_cmn_regs = phy->init_data->pcie_cmn_regs;
+ num_ln_regs = phy->init_data->pcie_ln_regs;
+ cmn_vals = phy->init_data->pcie_cmn_vals;
+ ln_vals = phy->init_data->pcie_ln_vals;
} else if (ins->phy_type == PHY_TYPE_USB3) {
- num_regs = phy->init_data->usb_regs;
- vals = phy->init_data->usb_vals;
+ num_cmn_regs = phy->init_data->usb_cmn_regs;
+ num_ln_regs = phy->init_data->usb_ln_regs;
+ cmn_vals = phy->init_data->usb_cmn_vals;
+ ln_vals = phy->init_data->usb_ln_vals;
} else {
- return;
+ return -EINVAL;
}
- for (i = 0; i < ins->num_lanes; i++)
- for (j = 0; j < num_regs ; j++)
- writel(vals[j].val, phy->base +
- vals[j].off + (i + ins->mlane) * 0x800);
+
+ regmap = phy->regmap_common_cdb;
+ for (j = 0; j < num_cmn_regs ; j++)
+ regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+
+ for (i = 0; i < ins->num_lanes; i++) {
+ for (j = 0; j < num_ln_regs ; j++) {
+ regmap = phy->regmap_lane_cdb[i + ins->mlane];
+ regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ }
+ }
+
+ return 0;
}
static int cdns_sierra_phy_on(struct phy *gphy)
{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+ struct device *dev = sp->dev;
+ u32 val;
+ int ret;
/* Take the PHY lane group out of reset */
- return reset_control_deassert(ins->lnk_rst);
+ ret = reset_control_deassert(ins->lnk_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY lane out of reset\n");
+ return ret;
+ }
+
+ ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
+ val, val, 1000, PLL_LOCK_TIME);
+ if (ret < 0)
+ dev_err(dev, "PLL lock of lane failed\n");
+
+ return ret;
}
static int cdns_sierra_phy_off(struct phy *gphy)
@@ -136,9 +341,20 @@ static int cdns_sierra_phy_off(struct phy *gphy)
return reset_control_assert(ins->lnk_rst);
}
+static int cdns_sierra_phy_reset(struct phy *gphy)
+{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
+
+ reset_control_assert(sp->phy_rst);
+ reset_control_deassert(sp->phy_rst);
+ return 0;
+};
+
static const struct phy_ops ops = {
+ .init = cdns_sierra_phy_init,
.power_on = cdns_sierra_phy_on,
.power_off = cdns_sierra_phy_off,
+ .reset = cdns_sierra_phy_reset,
.owner = THIS_MODULE,
};
@@ -159,41 +375,152 @@ static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
static const struct of_device_id cdns_sierra_id_table[];
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+ u32 block_offset, u8 reg_offset_shift,
+ const struct regmap_config *config)
+{
+ struct cdns_regmap_cdb_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->base = base + block_offset;
+ ctx->reg_offset_shift = reg_offset_shift;
+
+ return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_regfield_init(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+ int i;
+
+ regmap = sp->regmap_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
+ if (IS_ERR(field)) {
+ dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->macro_id_type = field;
+
+ regmap = sp->regmap_phy_config_ctrl;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->phy_pll_cfg_1 = field;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
+ if (IS_ERR(field)) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->pllctrl_lock[i] = field;
+ }
+
+ return 0;
+}
+
+static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
+ void __iomem *base, u8 block_offset_shift,
+ u8 reg_offset_shift)
+{
+ struct device *dev = sp->dev;
+ struct regmap *regmap;
+ u32 block_offset;
+ int i;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_lane_cdb[i] = regmap;
+ }
+
+ regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
+ reg_offset_shift,
+ &cdns_sierra_common_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_common_cdb = regmap;
+
+ block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_config_ctrl_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY config and control regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_config_ctrl = regmap;
+
+ return 0;
+}
+
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
+ struct cdns_sierra_data *data;
+ unsigned int id_value;
struct resource *res;
int i, ret, node = 0;
+ void __iomem *base;
+ struct clk *clk;
struct device_node *dn = dev->of_node, *child;
if (of_get_child_count(dn) == 0)
return -ENODEV;
+ /* Get init data for this PHY */
+ match = of_match_device(cdns_sierra_id_table, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_sierra_data *)match->data;
+
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return -ENOMEM;
dev_set_drvdata(dev, sp);
sp->dev = dev;
+ sp->init_data = data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sp->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(sp->base)) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
dev_err(dev, "missing \"reg\"\n");
- return PTR_ERR(sp->base);
+ return PTR_ERR(base);
}
- /* Get init data for this PHY */
- match = of_match_device(cdns_sierra_id_table, dev);
- if (!match)
- return -EINVAL;
- sp->init_data = (struct cdns_sierra_data *)match->data;
+ ret = cdns_regmap_init_blocks(sp, base, data->block_offset_shift,
+ data->reg_offset_shift);
+ if (ret)
+ return ret;
+
+ ret = cdns_regfield_init(sp);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, sp);
- sp->clk = devm_clk_get(dev, "phy_clk");
+ sp->clk = devm_clk_get_optional(dev, "phy_clk");
if (IS_ERR(sp->clk)) {
dev_err(dev, "failed to get clock phy_clk\n");
return PTR_ERR(sp->clk);
@@ -205,12 +532,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
return PTR_ERR(sp->phy_rst);
}
- sp->apb_rst = devm_reset_control_get(dev, "sierra_apb");
+ sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
if (IS_ERR(sp->apb_rst)) {
dev_err(dev, "failed to get apb reset\n");
return PTR_ERR(sp->apb_rst);
}
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->cmn_refclk_dig_div = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->cmn_refclk1_dig_div = clk;
+
ret = clk_prepare_enable(sp->clk);
if (ret)
return ret;
@@ -219,7 +562,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
reset_control_deassert(sp->apb_rst);
/* Check that PHY is present */
- if (sp->init_data->id_value != readl(sp->base)) {
+ regmap_field_read(sp->macro_id_type, &id_value);
+ if (sp->init_data->id_value != id_value) {
ret = -EINVAL;
goto clk_disable;
}
@@ -230,7 +574,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
struct phy *gphy;
sp->phys[node].lnk_rst =
- of_reset_control_get_exclusive_by_index(child, 0);
+ of_reset_control_array_get_exclusive(child);
if (IS_ERR(sp->phys[node].lnk_rst)) {
dev_err(dev, "failed to get reset %s\n",
@@ -248,6 +592,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
}
}
+ sp->num_lanes += sp->phys[node].num_lanes;
+
gphy = devm_phy_create(dev, child, &ops);
if (IS_ERR(gphy)) {
@@ -257,17 +603,18 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
- /* Initialise the PHY registers, unless auto configured */
- if (!sp->autoconf)
- cdns_sierra_phy_init(gphy);
-
node++;
}
sp->nsubnodes = node;
+ if (sp->num_lanes > SIERRA_MAX_LANES) {
+ dev_err(dev, "Invalid lane configuration\n");
+ goto put_child2;
+ }
+
/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1)
- writel(2, sp->base + SIERRA_PHY_PLL_CFG);
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -288,7 +635,7 @@ clk_disable:
static int cdns_sierra_phy_remove(struct platform_device *pdev)
{
- struct cdns_sierra_phy *phy = dev_get_drvdata(pdev->dev.parent);
+ struct cdns_sierra_phy *phy = platform_get_drvdata(pdev);
int i;
reset_control_assert(phy->phy_rst);
@@ -306,68 +653,158 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
return 0;
}
-static struct cdns_reg_pairs cdns_usb_regs[] = {
- /*
- * Write USB configuration parameters to the PHY.
- * These values are specific to this specific hardware
- * configuration.
- */
- {0xFE0A, SIERRA_DET_STANDEC_A},
- {0x000F, SIERRA_DET_STANDEC_B},
- {0x55A5, SIERRA_DET_STANDEC_C},
- {0x69AD, SIERRA_DET_STANDEC_D},
- {0x0241, SIERRA_DET_STANDEC_E},
- {0x0110, SIERRA_PSM_LANECAL},
- {0xCF00, SIERRA_PSM_DIAG},
- {0x001F, SIERRA_PSC_TX_A0},
- {0x0007, SIERRA_PSC_TX_A1},
- {0x0003, SIERRA_PSC_TX_A2},
- {0x0003, SIERRA_PSC_TX_A3},
- {0x0FFF, SIERRA_PSC_RX_A0},
- {0x0003, SIERRA_PSC_RX_A1},
- {0x0003, SIERRA_PSC_RX_A2},
- {0x0001, SIERRA_PSC_RX_A3},
- {0x0001, SIERRA_PLLCTRL_SUBRATE},
- {0x0406, SIERRA_PLLCTRL_GEN_D},
- {0x0000, SIERRA_DRVCTRL_ATTEN},
- {0x823E, SIERRA_CLKPATHCTRL_TMR},
- {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1},
- {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0},
- {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01},
- {0x023C, SIERRA_RX_CTLE_MAINTENANCE},
- {0x3232, SIERRA_CREQ_FSMCLK_SEL},
- {0x8452, SIERRA_CTLELUT_CTRL},
- {0x4121, SIERRA_DFE_ECMP_RATESEL},
- {0x4121, SIERRA_DFE_SMP_RATESEL},
- {0x9999, SIERRA_DEQ_VGATUNE_CTRL},
- {0x0330, SIERRA_TMRVAL_MODE0},
- {0x01FF, SIERRA_PICNT_MODE1},
- {0x0009, SIERRA_CPI_OUTBUF_RATESEL},
- {0x000F, SIERRA_LFPSFILT_NS},
- {0x0009, SIERRA_LFPSFILT_RD},
- {0x0001, SIERRA_LFPSFILT_MP},
- {0x8013, SIERRA_SDFILT_H2L_A},
- {0x0400, SIERRA_TMRVAL_MODE1},
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
};
-static struct cdns_reg_pairs cdns_pcie_regs[] = {
- /*
- * Write PCIe configuration parameters to the PHY.
- * These values are specific to this specific hardware
- * configuration.
- */
- {0x891f, SIERRA_DET_STANDEC_D},
- {0x0053, SIERRA_DET_STANDEC_E},
- {0x0400, SIERRA_TMRVAL_MODE2},
- {0x0200, SIERRA_TMRVAL_MODE3},
+/* refclk100MHz_32b_PCIe_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+};
+
+/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* refclk100MHz_20b_USB_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
+ {0xFE0A, SIERRA_DET_STANDEC_A_PREG},
+ {0x000F, SIERRA_DET_STANDEC_B_PREG},
+ {0x00A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x69ad, SIERRA_DET_STANDEC_D_PREG},
+ {0x0241, SIERRA_DET_STANDEC_E_PREG},
+ {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0014, SIERRA_PSM_A0IN_TMR_PREG},
+ {0xCF00, SIERRA_PSM_DIAG_PREG},
+ {0x001F, SIERRA_PSC_TX_A0_PREG},
+ {0x0007, SIERRA_PSC_TX_A1_PREG},
+ {0x0003, SIERRA_PSC_TX_A2_PREG},
+ {0x0003, SIERRA_PSC_TX_A3_PREG},
+ {0x0FFF, SIERRA_PSC_RX_A0_PREG},
+ {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A2_PREG},
+ {0x0001, SIERRA_PSC_RX_A3_PREG},
+ {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
+ {0x2512, SIERRA_DFE_BIASTRIM_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8000, SIERRA_CREQ_SPARE_PREG},
+ {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x8453, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0014, SIERRA_DEQ_GLUT0},
+ {0x0014, SIERRA_DEQ_GLUT1},
+ {0x0014, SIERRA_DEQ_GLUT2},
+ {0x0014, SIERRA_DEQ_GLUT3},
+ {0x0014, SIERRA_DEQ_GLUT4},
+ {0x0014, SIERRA_DEQ_GLUT5},
+ {0x0014, SIERRA_DEQ_GLUT6},
+ {0x0014, SIERRA_DEQ_GLUT7},
+ {0x0014, SIERRA_DEQ_GLUT8},
+ {0x0014, SIERRA_DEQ_GLUT9},
+ {0x0014, SIERRA_DEQ_GLUT10},
+ {0x0014, SIERRA_DEQ_GLUT11},
+ {0x0014, SIERRA_DEQ_GLUT12},
+ {0x0014, SIERRA_DEQ_GLUT13},
+ {0x0014, SIERRA_DEQ_GLUT14},
+ {0x0014, SIERRA_DEQ_GLUT15},
+ {0x0014, SIERRA_DEQ_GLUT16},
+ {0x0BAE, SIERRA_DEQ_ALUT0},
+ {0x0AEB, SIERRA_DEQ_ALUT1},
+ {0x0A28, SIERRA_DEQ_ALUT2},
+ {0x0965, SIERRA_DEQ_ALUT3},
+ {0x08A2, SIERRA_DEQ_ALUT4},
+ {0x07DF, SIERRA_DEQ_ALUT5},
+ {0x071C, SIERRA_DEQ_ALUT6},
+ {0x0659, SIERRA_DEQ_ALUT7},
+ {0x0596, SIERRA_DEQ_ALUT8},
+ {0x0514, SIERRA_DEQ_ALUT9},
+ {0x0492, SIERRA_DEQ_ALUT10},
+ {0x0410, SIERRA_DEQ_ALUT11},
+ {0x038E, SIERRA_DEQ_ALUT12},
+ {0x030C, SIERRA_DEQ_ALUT13},
+ {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
+ {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
+ {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
+ {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
+ {0x000F, SIERRA_LFPSFILT_NS_PREG},
+ {0x0009, SIERRA_LFPSFILT_RD_PREG},
+ {0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x8013, SIERRA_SDFILT_H2L_A_PREG},
+ {0x8009, SIERRA_SDFILT_L2H_PREG},
+ {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static const struct cdns_sierra_data cdns_map_sierra = {
SIERRA_MACRO_ID,
- ARRAY_SIZE(cdns_pcie_regs),
- ARRAY_SIZE(cdns_usb_regs),
- cdns_pcie_regs,
- cdns_usb_regs
+ 0x2,
+ 0x2,
+ ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+ cdns_pcie_cmn_regs_ext_ssc,
+ cdns_pcie_ln_regs_ext_ssc,
+ cdns_usb_cmn_regs_ext_ssc,
+ cdns_usb_ln_regs_ext_ssc,
+};
+
+static const struct cdns_sierra_data cdns_ti_map_sierra = {
+ SIERRA_MACRO_ID,
+ 0x0,
+ 0x1,
+ ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+ cdns_pcie_cmn_regs_ext_ssc,
+ cdns_pcie_ln_regs_ext_ssc,
+ cdns_usb_cmn_regs_ext_ssc,
+ cdns_usb_ln_regs_ext_ssc,
};
static const struct of_device_id cdns_sierra_id_table[] = {
@@ -375,6 +812,10 @@ static const struct of_device_id cdns_sierra_id_table[] = {
.compatible = "cdns,sierra-phy-t0",
.data = &cdns_map_sierra,
},
+ {
+ .compatible = "ti,sierra-phy-t0",
+ .data = &cdns_ti_map_sierra,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cdns_sierra_id_table);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index 534e393a09b3..1c73053bcc98 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -33,14 +33,14 @@ config PHY_HISTB_COMBPHY
If unsure, say N.
config PHY_HISI_INNO_USB2
- tristate "HiSilicon INNO USB2 PHY support"
- depends on (ARCH_HISI && ARM64) || COMPILE_TEST
- select GENERIC_PHY
- select MFD_SYSCON
- help
- Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
- USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
- USB host port to accept one USB device.
+ tristate "HiSilicon INNO USB2 PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
+ USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
+ USB host port to accept one USB device.
config PHY_HIX5HD2_SATA
tristate "HIX5HD2 SATA PHY Driver"
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
new file mode 100644
index 000000000000..4ea6a8897cd7
--- /dev/null
+++ b/drivers/phy/intel/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Intel Lightning Mountain(LGM) platform
+#
+config PHY_INTEL_EMMC
+ tristate "Intel EMMC PHY driver"
+ select GENERIC_PHY
+ help
+ Enable this to support the Intel EMMC PHY
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
new file mode 100644
index 000000000000..6b876a75599d
--- /dev/null
+++ b/drivers/phy/intel/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
diff --git a/drivers/phy/intel/phy-intel-emmc.c b/drivers/phy/intel/phy-intel-emmc.c
new file mode 100644
index 000000000000..703aeb122541
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-emmc.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel eMMC PHY driver
+ * Copyright (C) 2019 Intel, Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* eMMC phy register definitions */
+#define EMMC_PHYCTRL0_REG 0xa8
+#define DR_TY_MASK GENMASK(30, 28)
+#define DR_TY_SHIFT(x) (((x) << 28) & DR_TY_MASK)
+#define OTAPDLYENA BIT(14)
+#define OTAPDLYSEL_MASK GENMASK(13, 10)
+#define OTAPDLYSEL_SHIFT(x) (((x) << 10) & OTAPDLYSEL_MASK)
+
+#define EMMC_PHYCTRL1_REG 0xac
+#define PDB_MASK BIT(0)
+#define PDB_SHIFT(x) (((x) << 0) & PDB_MASK)
+#define ENDLL_MASK BIT(7)
+#define ENDLL_SHIFT(x) (((x) << 7) & ENDLL_MASK)
+
+#define EMMC_PHYCTRL2_REG 0xb0
+#define FRQSEL_25M 0
+#define FRQSEL_50M 1
+#define FRQSEL_100M 2
+#define FRQSEL_150M 3
+#define FRQSEL_MASK GENMASK(24, 22)
+#define FRQSEL_SHIFT(x) (((x) << 22) & FRQSEL_MASK)
+
+#define EMMC_PHYSTAT_REG 0xbc
+#define CALDONE_MASK BIT(9)
+#define DLLRDY_MASK BIT(8)
+#define IS_CALDONE(x) ((x) & CALDONE_MASK)
+#define IS_DLLRDY(x) ((x) & DLLRDY_MASK)
+
+struct intel_emmc_phy {
+ struct regmap *syscfg;
+ struct clk *emmcclk;
+};
+
+static int intel_emmc_phy_power(struct phy *phy, bool on_off)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ unsigned int caldone;
+ unsigned int dllrdy;
+ unsigned int freqsel;
+ unsigned long rate;
+ int ret, quot;
+
+ /*
+ * Keep phyctrl_pdb and phyctrl_endll low to allow
+ * initialization of CALIO state M/C DFFs
+ */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(0));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Already finish power_off above */
+ if (!on_off)
+ return 0;
+
+ rate = clk_get_rate(priv->emmcclk);
+ quot = DIV_ROUND_CLOSEST(rate, 50000000);
+ if (quot > FRQSEL_150M)
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+ freqsel = clamp_t(int, quot, FRQSEL_25M, FRQSEL_150M);
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(5);
+
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A process,voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg, EMMC_PHYSTAT_REG,
+ caldone, IS_CALDONE(caldone),
+ 0, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Set the frequency of the DLL operation */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL2_REG, FRQSEL_MASK,
+ FRQSEL_SHIFT(freqsel));
+ if (ret) {
+ dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
+ return ret;
+ }
+
+ /* Turn on the DLL */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, ENDLL_MASK,
+ ENDLL_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * Hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg,
+ EMMC_PHYSTAT_REG,
+ dllrdy, IS_DLLRDY(dllrdy),
+ 0, 50 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&phy->dev, "dllrdy failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_init(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error just return it like
+ * any other error to user.
+ *
+ */
+ priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ if (IS_ERR(priv->emmcclk)) {
+ dev_err(&phy->dev, "ERROR: getting emmcclk\n");
+ return PTR_ERR(priv->emmcclk);
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_exit(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ clk_put(priv->emmcclk);
+
+ return 0;
+}
+
+static int intel_emmc_phy_power_on(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Drive impedance: 50 Ohm */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, DR_TY_MASK,
+ DR_TY_SHIFT(6));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR set drive-impednce-50ohm: %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay: disable */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, OTAPDLYENA,
+ 0);
+ if (ret) {
+ dev_err(&phy->dev, "ERROR Set output tap delay : %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG,
+ OTAPDLYSEL_MASK, OTAPDLYSEL_SHIFT(4));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: output tap dly select: %d\n", ret);
+ return ret;
+ }
+
+ /* Power up eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, true);
+}
+
+static int intel_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, false);
+}
+
+static const struct phy_ops ops = {
+ .init = intel_emmc_phy_init,
+ .exit = intel_emmc_phy_exit,
+ .power_on = intel_emmc_phy_power_on,
+ .power_off = intel_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int intel_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct intel_emmc_phy *priv;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Get eMMC phy (accessed via chiptop) regmap */
+ priv->syscfg = syscon_regmap_lookup_by_phandle(np, "intel,syscon");
+ if (IS_ERR(priv->syscfg)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(priv->syscfg);
+ }
+
+ generic_phy = devm_phy_create(dev, np, &ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id intel_emmc_phy_dt_ids[] = {
+ { .compatible = "intel,lgm-emmc-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, intel_emmc_phy_dt_ids);
+
+static struct platform_driver intel_emmc_driver = {
+ .probe = intel_emmc_phy_probe,
+ .driver = {
+ .name = "intel-emmc-phy",
+ .of_match_table = intel_emmc_phy_dt_ids,
+ },
+};
+
+module_platform_driver(intel_emmc_driver);
+
+MODULE_AUTHOR("Peter Harliman Liem <peter.harliman.liem@intel.com>");
+MODULE_DESCRIPTION("Intel eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 6e457967653e..2ff9a48d833e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -386,7 +386,7 @@ static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
default:
dev_err(dev, "invalid PHY mode %u\n", mode);
return ERR_PTR(-EINVAL);
- };
+ }
return priv->phy;
}
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 005e02dd4a91..8f6273c837ec 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -10,14 +10,16 @@ config ARMADA375_USBCLUSTER_PHY
config PHY_BERLIN_SATA
tristate "Marvell Berlin SATA PHY driver"
- depends on ARCH_BERLIN && HAS_IOMEM && OF
+ depends on ARCH_BERLIN || COMPILE_TEST
+ depends on OF && HAS_IOMEM
select GENERIC_PHY
help
Enable this to support the SATA PHY on Marvell Berlin SoCs.
config PHY_BERLIN_USB
tristate "Marvell Berlin USB PHY Driver"
- depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
+ depends on ARCH_BERLIN || COMPILE_TEST
+ depends on OF && HAS_IOMEM && RESET_CONTROLLER
select GENERIC_PHY
help
Enable this to support the USB PHY on Marvell Berlin SoCs.
@@ -95,7 +97,7 @@ config PHY_PXA_28NM_USB2
config PHY_PXA_USB
tristate "Marvell PXA USB PHY Driver"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support Marvell PXA USB PHY driver for Marvell
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index e3b87c94aaf6..e41367f36ee1 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -221,7 +221,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
- ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI),
GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
@@ -235,14 +235,14 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 4 */
ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
- ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GKR, -1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
@@ -342,7 +342,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE);
switch (lane->submode) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
break;
@@ -417,7 +417,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
/* refclk selection */
val = readl(priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id));
val &= ~MVEBU_COMPHY_MISC_CTRL0_REFCLK_SEL;
- if (lane->submode == PHY_INTERFACE_MODE_10GKR)
+ if (lane->submode == PHY_INTERFACE_MODE_10GBASER)
val |= MVEBU_COMPHY_MISC_CTRL0_ICP_FORCE;
writel(val, priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id));
@@ -564,7 +564,7 @@ static int mvebu_comphy_set_mode_rxaui(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
-static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
+static int mvebu_comphy_set_mode_10gbaser(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -735,8 +735,8 @@ static int mvebu_comphy_power_on_legacy(struct phy *phy)
case PHY_INTERFACE_MODE_RXAUI:
ret = mvebu_comphy_set_mode_rxaui(phy);
break;
- case PHY_INTERFACE_MODE_10GKR:
- ret = mvebu_comphy_set_mode_10gkr(phy);
+ case PHY_INTERFACE_MODE_10GBASER:
+ ret = mvebu_comphy_set_mode_10gbaser(phy);
break;
default:
return -ENOTSUPP;
@@ -782,8 +782,8 @@ static int mvebu_comphy_power_on(struct phy *phy)
lane->id);
fw_speed = COMPHY_FW_SPEED_3125;
break;
- case PHY_INTERFACE_MODE_10GKR:
- dev_dbg(priv->dev, "set lane %d to 10G-KR mode\n",
+ case PHY_INTERFACE_MODE_10GBASER:
+ dev_dbg(priv->dev, "set lane %d to 10GBASE-R mode\n",
lane->id);
fw_speed = COMPHY_FW_SPEED_103125;
break;
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 376f5d189da0..dee757c957f2 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -3,12 +3,13 @@
# Phy drivers for Mediatek devices
#
config PHY_MTK_TPHY
- tristate "MediaTek T-PHY Driver"
- depends on ARCH_MEDIATEK && OF
- select GENERIC_PHY
- help
- Say 'Y' here to add support for MediaTek T-PHY driver,
- it supports multiple usb2.0, usb3.0 ports, PCIe and
+ tristate "MediaTek T-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for MediaTek T-PHY driver,
+ it supports multiple usb2.0, usb3.0 ports, PCIe and
SATA, and meanwhile supports two version T-PHY which have
different banks layout, the T-PHY with shared banks between
multi-ports is first version, otherwise is second veriosn,
@@ -16,7 +17,8 @@ config PHY_MTK_TPHY
config PHY_MTK_UFS
tristate "MediaTek UFS M-PHY driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
select GENERIC_PHY
help
Support for UFS M-PHY on MediaTek chipsets.
@@ -25,10 +27,11 @@ config PHY_MTK_UFS
specified M-PHYs.
config PHY_MTK_XSPHY
- tristate "MediaTek XS-PHY Driver"
- depends on ARCH_MEDIATEK && OF
- select GENERIC_PHY
- help
+ tristate "MediaTek XS-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
Enable this to support the SuperSpeedPlus XS-PHY transceiver for
USB3.1 GEN2 controllers on MediaTek chips. The driver supports
multiple USB2.0, USB3.1 GEN2 ports.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b04f4fe85ac2..2eb28cc2d2dc 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -29,7 +29,7 @@ static void devm_phy_release(struct device *dev, void *res)
{
struct phy *phy = *(struct phy **)res;
- phy_put(phy);
+ phy_put(dev, phy);
}
static void devm_phy_provider_release(struct device *dev, void *res)
@@ -566,12 +566,12 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id)
EXPORT_SYMBOL_GPL(of_phy_get);
/**
- * phy_put() - release the PHY
- * @phy: the phy returned by phy_get()
+ * of_phy_put() - release the PHY
+ * @phy: the phy returned by of_phy_get()
*
- * Releases a refcount the caller received from phy_get().
+ * Releases a refcount the caller received from of_phy_get().
*/
-void phy_put(struct phy *phy)
+void of_phy_put(struct phy *phy)
{
if (!phy || IS_ERR(phy))
return;
@@ -584,6 +584,20 @@ void phy_put(struct phy *phy)
module_put(phy->ops->owner);
put_device(&phy->dev);
}
+EXPORT_SYMBOL_GPL(of_phy_put);
+
+/**
+ * phy_put() - release the PHY
+ * @dev: device that wants to release this phy
+ * @phy: the phy returned by phy_get()
+ *
+ * Releases a refcount the caller received from phy_get().
+ */
+void phy_put(struct device *dev, struct phy *phy)
+{
+ device_link_remove(dev, &phy->dev);
+ of_phy_put(phy);
+}
EXPORT_SYMBOL_GPL(phy_put);
/**
@@ -651,6 +665,7 @@ struct phy *phy_get(struct device *dev, const char *string)
{
int index = 0;
struct phy *phy;
+ struct device_link *link;
if (string == NULL) {
dev_WARN(dev, "missing string\n");
@@ -672,6 +687,13 @@ struct phy *phy_get(struct device *dev, const char *string)
get_device(&phy->dev);
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
+ }
+
return phy;
}
EXPORT_SYMBOL_GPL(phy_get);
@@ -765,6 +787,7 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id)
{
struct phy **ptr, *phy;
+ struct device_link *link;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -776,6 +799,14 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
devres_add(dev, ptr);
} else {
devres_free(ptr);
+ return phy;
+ }
+
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
}
return phy;
@@ -798,6 +829,7 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index)
{
struct phy **ptr, *phy;
+ struct device_link *link;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -819,6 +851,13 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
*ptr = phy;
devres_add(dev, ptr);
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
+ }
+
return phy;
}
EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 42bc5150dd92..febe0aef68d4 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -80,7 +80,7 @@ static int read_poll_timeout(void __iomem *addr, u32 mask)
if (readl_relaxed(addr) & mask)
return 0;
- usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
+ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
} while (!time_after(jiffies, timeout));
return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 66f91726b8b2..7db2a94f7a99 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -166,8 +166,9 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
};
static const unsigned int sm8150_ufsphy_regs_layout[] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x180,
+ [QPHY_START_CTRL] = QPHY_V4_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V4_SW_RESET,
};
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
@@ -885,7 +886,6 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
@@ -1390,7 +1390,6 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.is_dual_lane_phy = true,
- .no_pcs_sw_reset = true,
};
static void qcom_qmp_phy_configure(void __iomem *base,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index ab6ff9b45a32..90f793c2293d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*/
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index dbd2de4d28b1..0824b9dd5683 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -39,6 +39,7 @@ config PHY_ROCKCHIP_INNO_DSIDPHY
tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
help
Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
Innosilicon IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index fc729ecd3fe9..a7c6c940a3a8 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
#include <linux/pm_runtime.h>
#include <linux/mfd/syscon.h>
@@ -167,31 +168,6 @@
#define DSI_PHY_STATUS 0xb0
#define PHY_LOCK BIT(0)
-struct mipi_dphy_timing {
- unsigned int clkmiss;
- unsigned int clkpost;
- unsigned int clkpre;
- unsigned int clkprepare;
- unsigned int clksettle;
- unsigned int clktermen;
- unsigned int clktrail;
- unsigned int clkzero;
- unsigned int dtermen;
- unsigned int eot;
- unsigned int hsexit;
- unsigned int hsprepare;
- unsigned int hszero;
- unsigned int hssettle;
- unsigned int hsskip;
- unsigned int hstrail;
- unsigned int init;
- unsigned int lpx;
- unsigned int taget;
- unsigned int tago;
- unsigned int tasure;
- unsigned int wakeup;
-};
-
struct inno_dsidphy {
struct device *dev;
struct clk *ref_clk;
@@ -201,7 +177,9 @@ struct inno_dsidphy {
void __iomem *host_base;
struct reset_control *rst;
enum phy_mode mode;
+ struct phy_configure_opts_mipi_dphy dphy_cfg;
+ struct clk *pll_clk;
struct {
struct clk_hw hw;
u8 prediv;
@@ -238,37 +216,79 @@ static void phy_update_bits(struct inno_dsidphy *inno,
writel(tmp, inno->phy_base + reg);
}
-static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
- unsigned long period)
+static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
+ unsigned long rate)
{
- /* Global Operation Timing Parameters */
- timing->clkmiss = 0;
- timing->clkpost = 70000 + 52 * period;
- timing->clkpre = 8 * period;
- timing->clkprepare = 65000;
- timing->clksettle = 95000;
- timing->clktermen = 0;
- timing->clktrail = 80000;
- timing->clkzero = 260000;
- timing->dtermen = 0;
- timing->eot = 0;
- timing->hsexit = 120000;
- timing->hsprepare = 65000 + 4 * period;
- timing->hszero = 145000 + 6 * period;
- timing->hssettle = 85000 + 6 * period;
- timing->hsskip = 40000;
- timing->hstrail = max(8 * period, 60000 + 4 * period);
- timing->init = 100000000;
- timing->lpx = 60000;
- timing->taget = 5 * timing->lpx;
- timing->tago = 4 * timing->lpx;
- timing->tasure = 2 * timing->lpx;
- timing->wakeup = 1000000000;
+ unsigned long prate = clk_get_rate(inno->ref_clk);
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ inno->pll.prediv = best_prediv;
+ inno->pll.fbdiv = best_fbdiv;
+ inno->pll.rate = best_freq;
+ }
+
+ return best_freq;
}
static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{
- struct mipi_dphy_timing gotp;
+ struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
const struct {
unsigned long rate;
u8 hs_prepare;
@@ -288,12 +308,14 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{ 800000000, 0x21, 0x1f, 0x09, 0x29},
{1000000000, 0x09, 0x20, 0x09, 0x27},
};
- u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 t_txbyteclkhs, t_txclkesc;
u32 txbyteclkhs, txclkesc, esc_clk_div;
u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
unsigned int i;
+ inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
+
/* Select MIPI mode */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
@@ -328,32 +350,27 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
txclkesc = txbyteclkhs / esc_clk_div;
t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
- ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
-
- memset(&gotp, 0, sizeof(gotp));
- mipi_dphy_timing_get_default(&gotp, ui);
-
/*
* The value of counter for HS Ths-exit
* Ths-exit = Tpin_txbyteclkhs * value
*/
- hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ hs_exit = DIV_ROUND_UP(cfg->hs_exit, t_txbyteclkhs);
/*
* The value of counter for HS Tclk-post
* Tclk-post = Tpin_txbyteclkhs * value
*/
- clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ clk_post = DIV_ROUND_UP(cfg->clk_post, t_txbyteclkhs);
/*
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
/*
* The value of counter for HS Tlpx Time
* Tlpx = Tpin_txbyteclkhs * (2 + value)
*/
- lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
if (lpx >= 2)
lpx -= 2;
@@ -362,19 +379,19 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
* Tta-go for turnaround
* Tta-go = Ttxclkesc * value
*/
- ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ ta_go = DIV_ROUND_UP(cfg->ta_go, t_txclkesc);
/*
* The value of counter for HS Tta-sure
* Tta-sure for turnaround
* Tta-sure = Ttxclkesc * value
*/
- ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ ta_sure = DIV_ROUND_UP(cfg->ta_sure, t_txclkesc);
/*
* The value of counter for HS Tta-wait
* Tta-wait for turnaround
* Tta-wait = Ttxclkesc * value
*/
- ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+ ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
for (i = 0; i < ARRAY_SIZE(timings); i++)
if (inno->pll.rate <= timings[i].rate)
@@ -479,6 +496,7 @@ static int inno_dsidphy_power_on(struct phy *phy)
struct inno_dsidphy *inno = phy_get_drvdata(phy);
clk_prepare_enable(inno->pclk_phy);
+ clk_prepare_enable(inno->ref_clk);
pm_runtime_get_sync(inno->dev);
/* Bandgap power on */
@@ -524,6 +542,7 @@ static int inno_dsidphy_power_off(struct phy *phy)
LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->ref_clk);
clk_disable_unprepare(inno->pclk_phy);
return 0;
@@ -546,168 +565,32 @@ static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
return 0;
}
-static const struct phy_ops inno_dsidphy_ops = {
- .set_mode = inno_dsidphy_set_mode,
- .power_on = inno_dsidphy_power_on,
- .power_off = inno_dsidphy_power_off,
- .owner = THIS_MODULE,
-};
-
-static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
- unsigned long prate,
- unsigned long rate,
- u8 *prediv, u16 *fbdiv)
-{
- unsigned long best_freq = 0;
- unsigned long fref, fout;
- u8 min_prediv, max_prediv;
- u8 _prediv, best_prediv = 1;
- u16 _fbdiv, best_fbdiv = 1;
- u32 min_delta = UINT_MAX;
-
- /*
- * The PLL output frequency can be calculated using a simple formula:
- * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
- * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
- */
- fref = prate / 2;
- if (rate > 1000000000UL)
- fout = 1000000000UL;
- else
- fout = rate;
-
- /* 5Mhz < Fref / prediv < 40MHz */
- min_prediv = DIV_ROUND_UP(fref, 40000000);
- max_prediv = fref / 5000000;
-
- for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
- u64 tmp;
- u32 delta;
-
- tmp = (u64)fout * _prediv;
- do_div(tmp, fref);
- _fbdiv = tmp;
-
- /*
- * The possible settings of feedback divider are
- * 12, 13, 14, 16, ~ 511
- */
- if (_fbdiv == 15)
- continue;
-
- if (_fbdiv < 12 || _fbdiv > 511)
- continue;
-
- tmp = (u64)_fbdiv * fref;
- do_div(tmp, _prediv);
-
- delta = abs(fout - tmp);
- if (!delta) {
- best_prediv = _prediv;
- best_fbdiv = _fbdiv;
- best_freq = tmp;
- break;
- } else if (delta < min_delta) {
- best_prediv = _prediv;
- best_fbdiv = _fbdiv;
- best_freq = tmp;
- min_delta = delta;
- }
- }
-
- if (best_freq) {
- *prediv = best_prediv;
- *fbdiv = best_fbdiv;
- }
-
- return best_freq;
-}
-
-static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int inno_dsidphy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
{
- struct inno_dsidphy *inno = hw_to_inno(hw);
- unsigned long fout;
- u16 fbdiv = 1;
- u8 prediv = 1;
-
- fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
- &prediv, &fbdiv);
-
- return fout;
-}
-
-static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct inno_dsidphy *inno = hw_to_inno(hw);
- unsigned long fout;
- u16 fbdiv = 1;
- u8 prediv = 1;
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+ int ret;
- fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
- &prediv, &fbdiv);
+ if (inno->mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
- dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
- parent_rate, fout, prediv, fbdiv);
+ ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+ if (ret)
+ return ret;
- inno->pll.prediv = prediv;
- inno->pll.fbdiv = fbdiv;
- inno->pll.rate = fout;
+ memcpy(&inno->dphy_cfg, &opts->mipi_dphy, sizeof(inno->dphy_cfg));
return 0;
}
-static unsigned long
-inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
- struct inno_dsidphy *inno = hw_to_inno(hw);
-
- /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
- return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
-}
-
-static const struct clk_ops inno_dsidphy_pll_clk_ops = {
- .round_rate = inno_dsidphy_pll_clk_round_rate,
- .set_rate = inno_dsidphy_pll_clk_set_rate,
- .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+static const struct phy_ops inno_dsidphy_ops = {
+ .configure = inno_dsidphy_configure,
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
};
-static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
-{
- struct device *dev = inno->dev;
- struct clk *clk;
- const char *parent_name;
- struct clk_init_data init;
- int ret;
-
- parent_name = __clk_get_name(inno->ref_clk);
-
- init.name = "mipi_dphy_pll";
- ret = of_property_read_string(dev->of_node, "clock-output-names",
- &init.name);
- if (ret < 0)
- dev_dbg(dev, "phy should set clock-output-names property\n");
-
- init.ops = &inno_dsidphy_pll_clk_ops;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- init.flags = 0;
-
- inno->pll.hw.init = &init;
- clk = devm_clk_register(dev, &inno->pll.hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to register PLL: %d\n", ret);
- return ret;
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
- &inno->pll.hw);
-}
-
static int inno_dsidphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -764,10 +647,6 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
return ret;
}
- ret = inno_dsidphy_pll_register(inno);
- if (ret)
- return ret;
-
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 290a6c70f570..9e483d1fdaf2 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -32,7 +32,7 @@ config PHY_EXYNOS_PCIE
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
depends on HAS_IOMEM
- depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2
+ depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2 || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
default ARCH_EXYNOS
@@ -60,7 +60,7 @@ config PHY_EXYNOS5250_USB2
config PHY_S5PV210_USB2
bool "Support for S5PV210"
depends on PHY_SAMSUNG_USB2
- depends on ARCH_S5PV210
+ depends on ARCH_S5PV210 || COMPILE_TEST
help
Enable USB PHY support for S5PV210. This option requires that Samsung
USB 2.0 PHY driver is enabled and means that support for this
@@ -69,7 +69,7 @@ config PHY_S5PV210_USB2
config PHY_EXYNOS5_USBDRD
tristate "Exynos5 SoC series USB DRD PHY driver"
- depends on ARCH_EXYNOS && OF
+ depends on (ARCH_EXYNOS && OF) || COMPILE_TEST
depends on HAS_IOMEM
depends on USB_DWC3_EXYNOS
select GENERIC_PHY
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 174888609779..6dbe9d0b9ff3 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -4,7 +4,7 @@
#
config PHY_DA8XX_USB
tristate "TI DA8xx USB PHY Driver"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
help
@@ -14,7 +14,7 @@ config PHY_DA8XX_USB
config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
@@ -33,6 +33,22 @@ config PHY_AM654_SERDES
This option enables support for TI AM654 SerDes PHY used for
PCIe.
+config PHY_J721E_WIZ
+ tristate "TI J721E WIZ (SERDES Wrapper) support"
+ depends on OF && ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM && OF_ADDRESS
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ select MUX_MMIO
+ help
+ This option enables support for WIZ module present in TI's J721E
+ SoC. WIZ is a serdes wrapper used to configure some of the input
+ signals to the SERDES (Sierra/Torrent). This driver configures
+ three clock selects (pll0, pll1, dig) and resets for each of the
+ lanes.
+
config OMAP_CONTROL_PHY
tristate "OMAP CONTROL PHY Driver"
depends on ARCH_OMAP2PLUS || COMPILE_TEST
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
index bff901eb0ecc..dcba2571c9bd 100644
--- a/drivers/phy/ti/Makefile
+++ b/drivers/phy/ti/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_PHY_AM654_SERDES) += phy-am654-serdes.o
obj-$(CONFIG_PHY_TI_GMII_SEL) += phy-gmii-sel.o
+obj-$(CONFIG_PHY_J721E_WIZ) += phy-j721e-wiz.o
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
new file mode 100644
index 000000000000..7b51045df783
--- /dev/null
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Wrapper driver for SERDES used in J721E
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define WIZ_SERDES_CTRL 0x404
+#define WIZ_SERDES_TOP_CTRL 0x408
+#define WIZ_SERDES_RST 0x40c
+#define WIZ_SERDES_TYPEC 0x410
+#define WIZ_LANECTL(n) (0x480 + (0x40 * (n)))
+
+#define WIZ_MAX_LANES 4
+#define WIZ_MUX_NUM_CLOCKS 3
+#define WIZ_DIV_NUM_CLOCKS_16G 2
+#define WIZ_DIV_NUM_CLOCKS_10G 1
+
+#define WIZ_SERDES_TYPEC_LN10_SWAP BIT(30)
+
+enum wiz_lane_standard_mode {
+ LANE_MODE_GEN1,
+ LANE_MODE_GEN2,
+ LANE_MODE_GEN3,
+ LANE_MODE_GEN4,
+};
+
+enum wiz_refclk_mux_sel {
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+ REFCLK_DIG,
+};
+
+enum wiz_refclk_div_sel {
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+};
+
+static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
+static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field pll1_refclk_mux_sel =
+ REG_FIELD(WIZ_SERDES_RST, 29, 29);
+static const struct reg_field pll0_refclk_mux_sel =
+ REG_FIELD(WIZ_SERDES_RST, 28, 28);
+static const struct reg_field refclk_dig_sel_16g =
+ REG_FIELD(WIZ_SERDES_RST, 24, 25);
+static const struct reg_field refclk_dig_sel_10g =
+ REG_FIELD(WIZ_SERDES_RST, 24, 24);
+static const struct reg_field pma_cmn_refclk_int_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
+static const struct reg_field pma_cmn_refclk_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31);
+static const struct reg_field pma_cmn_refclk_dig_div =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
+static const struct reg_field pma_cmn_refclk1_dig_div =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+
+static const struct reg_field p_enable[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 30, 31),
+ REG_FIELD(WIZ_LANECTL(1), 30, 31),
+ REG_FIELD(WIZ_LANECTL(2), 30, 31),
+ REG_FIELD(WIZ_LANECTL(3), 30, 31),
+};
+
+static const struct reg_field p_align[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 29, 29),
+ REG_FIELD(WIZ_LANECTL(1), 29, 29),
+ REG_FIELD(WIZ_LANECTL(2), 29, 29),
+ REG_FIELD(WIZ_LANECTL(3), 29, 29),
+};
+
+static const struct reg_field p_raw_auto_start[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 28, 28),
+ REG_FIELD(WIZ_LANECTL(1), 28, 28),
+ REG_FIELD(WIZ_LANECTL(2), 28, 28),
+ REG_FIELD(WIZ_LANECTL(3), 28, 28),
+};
+
+static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 24, 25),
+ REG_FIELD(WIZ_LANECTL(1), 24, 25),
+ REG_FIELD(WIZ_LANECTL(2), 24, 25),
+ REG_FIELD(WIZ_LANECTL(3), 24, 25),
+};
+
+static const struct reg_field typec_ln10_swap =
+ REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
+
+struct wiz_clk_mux {
+ struct clk_hw hw;
+ struct regmap_field *field;
+ u32 *table;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_clk_mux(_hw) container_of(_hw, struct wiz_clk_mux, hw)
+
+struct wiz_clk_divider {
+ struct clk_hw hw;
+ struct regmap_field *field;
+ struct clk_div_table *table;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
+
+struct wiz_clk_mux_sel {
+ struct regmap_field *field;
+ u32 table[4];
+ const char *node_name;
+};
+
+struct wiz_clk_div_sel {
+ struct regmap_field *field;
+ struct clk_div_table *table;
+ const char *node_name;
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+ {
+ /*
+ * Mux value to be configured for each of the input clocks
+ * in the order populated in device tree
+ */
+ .table = { 1, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .table = { 1, 3, 0, 2 },
+ .node_name = "refclk-dig",
+ },
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+ {
+ /*
+ * Mux value to be configured for each of the input clocks
+ * in the order populated in device tree
+ */
+ .table = { 1, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "refclk-dig",
+ },
+};
+
+static struct clk_div_table clk_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 4, },
+ { .val = 3, .div = 8, },
+};
+
+static struct wiz_clk_div_sel clk_div_sel[] = {
+ {
+ .table = clk_div_table,
+ .node_name = "cmn-refclk-dig-div",
+ },
+ {
+ .table = clk_div_table,
+ .node_name = "cmn-refclk1-dig-div",
+ },
+};
+
+enum wiz_type {
+ J721E_WIZ_16G,
+ J721E_WIZ_10G,
+};
+
+#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
+#define WIZ_TYPEC_DIR_DEBOUNCE_MAX 1000
+
+struct wiz {
+ struct regmap *regmap;
+ enum wiz_type type;
+ struct wiz_clk_mux_sel *clk_mux_sel;
+ struct wiz_clk_div_sel *clk_div_sel;
+ unsigned int clk_div_sel_num;
+ struct regmap_field *por_en;
+ struct regmap_field *phy_reset_n;
+ struct regmap_field *p_enable[WIZ_MAX_LANES];
+ struct regmap_field *p_align[WIZ_MAX_LANES];
+ struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES];
+ struct regmap_field *p_standard_mode[WIZ_MAX_LANES];
+ struct regmap_field *pma_cmn_refclk_int_mode;
+ struct regmap_field *pma_cmn_refclk_mode;
+ struct regmap_field *pma_cmn_refclk_dig_div;
+ struct regmap_field *pma_cmn_refclk1_dig_div;
+ struct regmap_field *typec_ln10_swap;
+
+ struct device *dev;
+ u32 num_lanes;
+ struct platform_device *serdes_pdev;
+ struct reset_controller_dev wiz_phy_reset_dev;
+ struct gpio_desc *gpio_typec_dir;
+ int typec_dir_delay;
+};
+
+static int wiz_reset(struct wiz *wiz)
+{
+ int ret;
+
+ ret = regmap_field_write(wiz->por_en, 0x1);
+ if (ret)
+ return ret;
+
+ mdelay(1);
+
+ ret = regmap_field_write(wiz->por_en, 0x0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int wiz_mode_select(struct wiz *wiz)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int ret;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = regmap_field_write(wiz->p_standard_mode[i],
+ LANE_MODE_GEN4);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_init_raw_interface(struct wiz *wiz, bool enable)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = regmap_field_write(wiz->p_align[i], enable);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(wiz->p_raw_auto_start[i], enable);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_init(struct wiz *wiz)
+{
+ struct device *dev = wiz->dev;
+ int ret;
+
+ ret = wiz_reset(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ reset failed\n");
+ return ret;
+ }
+
+ ret = wiz_mode_select(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ mode select failed\n");
+ return ret;
+ }
+
+ ret = wiz_init_raw_interface(wiz, true);
+ if (ret) {
+ dev_err(dev, "WIZ interface initialization failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_regfield_init(struct wiz *wiz)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel;
+ struct wiz_clk_div_sel *clk_div_sel;
+ struct regmap *regmap = wiz->regmap;
+ int num_lanes = wiz->num_lanes;
+ struct device *dev = wiz->dev;
+ int i;
+
+ wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
+ if (IS_ERR(wiz->por_en)) {
+ dev_err(dev, "POR_EN reg field init failed\n");
+ return PTR_ERR(wiz->por_en);
+ }
+
+ wiz->phy_reset_n = devm_regmap_field_alloc(dev, regmap,
+ phy_reset_n);
+ if (IS_ERR(wiz->phy_reset_n)) {
+ dev_err(dev, "PHY_RESET_N reg field init failed\n");
+ return PTR_ERR(wiz->phy_reset_n);
+ }
+
+ wiz->pma_cmn_refclk_int_mode =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_int_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk_int_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK_INT_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk_int_mode);
+ }
+
+ wiz->pma_cmn_refclk_mode =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk_mode);
+ }
+
+ clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
+ clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pma_cmn_refclk_dig_div);
+ if (IS_ERR(clk_div_sel->field)) {
+ dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
+ return PTR_ERR(clk_div_sel->field);
+ }
+
+ if (wiz->type == J721E_WIZ_16G) {
+ clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
+ clk_div_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ pma_cmn_refclk1_dig_div);
+ if (IS_ERR(clk_div_sel->field)) {
+ dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
+ return PTR_ERR(clk_div_sel->field);
+ }
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
+ clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pll0_refclk_mux_sel);
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
+ clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pll1_refclk_mux_sel);
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
+ if (wiz->type == J721E_WIZ_10G)
+ clk_mux_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ refclk_dig_sel_10g);
+ else
+ clk_mux_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ refclk_dig_sel_16g);
+
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
+ p_enable[i]);
+ if (IS_ERR(wiz->p_enable[i])) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(wiz->p_enable[i]);
+ }
+
+ wiz->p_align[i] = devm_regmap_field_alloc(dev, regmap,
+ p_align[i]);
+ if (IS_ERR(wiz->p_align[i])) {
+ dev_err(dev, "P%d_ALIGN reg field init failed\n", i);
+ return PTR_ERR(wiz->p_align[i]);
+ }
+
+ wiz->p_raw_auto_start[i] =
+ devm_regmap_field_alloc(dev, regmap, p_raw_auto_start[i]);
+ if (IS_ERR(wiz->p_raw_auto_start[i])) {
+ dev_err(dev, "P%d_RAW_AUTO_START reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_raw_auto_start[i]);
+ }
+
+ wiz->p_standard_mode[i] =
+ devm_regmap_field_alloc(dev, regmap, p_standard_mode[i]);
+ if (IS_ERR(wiz->p_standard_mode[i])) {
+ dev_err(dev, "P%d_STANDARD_MODE reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_standard_mode[i]);
+ }
+ }
+
+ wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
+ typec_ln10_swap);
+ if (IS_ERR(wiz->typec_ln10_swap)) {
+ dev_err(dev, "LN10_SWAP reg field init failed\n");
+ return PTR_ERR(wiz->typec_ln10_swap);
+ }
+
+ return 0;
+}
+
+static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+ struct regmap_field *field = mux->field;
+ unsigned int val;
+
+ regmap_field_read(field, &val);
+ return clk_mux_val_to_index(hw, mux->table, 0, val);
+}
+
+static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+ struct regmap_field *field = mux->field;
+ int val;
+
+ val = mux->table[index];
+ return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_mux_ops = {
+ .set_parent = wiz_clk_mux_set_parent,
+ .get_parent = wiz_clk_mux_get_parent,
+};
+
+static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field, u32 *table)
+{
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct wiz_clk_mux *mux;
+ char clk_name[100];
+ struct clk *clk;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ dev_err(dev, "SERDES clock must have parents\n");
+ return -EINVAL;
+ }
+
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
+ GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, num_parents);
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ node->name);
+
+ init = &mux->clk_data;
+
+ init->ops = &wiz_clk_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->field = field;
+ mux->table = table;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+ return ret;
+}
+
+static unsigned long wiz_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+ struct regmap_field *field = div->field;
+ int val;
+
+ regmap_field_read(field, &val);
+
+ return divider_recalc_rate(hw, parent_rate, val, div->table, 0x0, 2);
+}
+
+static long wiz_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+
+ return divider_round_rate(hw, rate, prate, div->table, 2, 0x0);
+}
+
+static int wiz_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+ struct regmap_field *field = div->field;
+ int val;
+
+ val = divider_get_val(rate, parent_rate, div->table, 2, 0x0);
+ if (val < 0)
+ return val;
+
+ return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_div_ops = {
+ .recalc_rate = wiz_clk_div_recalc_rate,
+ .round_rate = wiz_clk_div_round_rate,
+ .set_rate = wiz_clk_div_set_rate,
+};
+
+static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field,
+ struct clk_div_table *table)
+{
+ struct device *dev = wiz->dev;
+ struct wiz_clk_divider *div;
+ struct clk_init_data *init;
+ const char **parent_names;
+ char clk_name[100];
+ struct clk *clk;
+ int ret;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ node->name);
+
+ parent_names = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, 1);
+
+ init = &div->clk_data;
+
+ init->ops = &wiz_clk_div_ops;
+ init->flags = 0;
+ init->parent_names = parent_names;
+ init->num_parents = 1;
+ init->name = clk_name;
+
+ div->field = field;
+ div->table = table;
+ div->hw.init = init;
+
+ clk = devm_clk_register(dev, &div->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+ return ret;
+}
+
+static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device_node *clk_node;
+ int i;
+
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+ clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
+ of_clk_del_provider(clk_node);
+ of_node_put(clk_node);
+ }
+}
+
+static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
+ struct device_node *clk_node;
+ const char *node_name;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+ int i;
+
+ clk = devm_clk_get(dev, "core_ref_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "core_ref_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+
+ clk = devm_clk_get(dev, "ext_ref_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "ext_ref_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+ node_name = clk_mux_sel[i].node_name;
+ clk_node = of_get_child_by_name(node, node_name);
+ if (!clk_node) {
+ dev_err(dev, "Unable to get %s node\n", node_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
+ clk_mux_sel[i].table);
+ if (ret) {
+ dev_err(dev, "Failed to register %s clock\n",
+ node_name);
+ of_node_put(clk_node);
+ goto err;
+ }
+
+ of_node_put(clk_node);
+ }
+
+ for (i = 0; i < wiz->clk_div_sel_num; i++) {
+ node_name = clk_div_sel[i].node_name;
+ clk_node = of_get_child_by_name(node, node_name);
+ if (!clk_node) {
+ dev_err(dev, "Unable to get %s node\n", node_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+ clk_div_sel[i].table);
+ if (ret) {
+ dev_err(dev, "Failed to register %s clock\n",
+ node_name);
+ of_node_put(clk_node);
+ goto err;
+ }
+
+ of_node_put(clk_node);
+ }
+
+ return 0;
+err:
+ wiz_clock_cleanup(wiz, node);
+
+ return ret;
+}
+
+static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct device *dev = rcdev->dev;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (id == 0) {
+ ret = regmap_field_write(wiz->phy_reset_n, false);
+ return ret;
+ }
+
+ ret = regmap_field_write(wiz->p_enable[id - 1], false);
+ return ret;
+}
+
+static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct device *dev = rcdev->dev;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret;
+
+ /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
+ if (id == 0 && wiz->gpio_typec_dir) {
+ if (wiz->typec_dir_delay)
+ msleep_interruptible(wiz->typec_dir_delay);
+
+ if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ else
+ regmap_field_write(wiz->typec_ln10_swap, 0);
+ }
+
+ if (id == 0) {
+ ret = regmap_field_write(wiz->phy_reset_n, true);
+ return ret;
+ }
+
+ ret = regmap_field_write(wiz->p_enable[id - 1], true);
+ return ret;
+}
+
+static const struct reset_control_ops wiz_phy_reset_ops = {
+ .assert = wiz_phy_reset_assert,
+ .deassert = wiz_phy_reset_deassert,
+};
+
+static struct regmap_config wiz_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static const struct of_device_id wiz_id_table[] = {
+ {
+ .compatible = "ti,j721e-wiz-16g", .data = (void *)J721E_WIZ_16G
+ },
+ {
+ .compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wiz_id_table);
+
+static int wiz_probe(struct platform_device *pdev)
+{
+ struct reset_controller_dev *phy_reset_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *serdes_pdev;
+ struct device_node *child_node;
+ struct regmap *regmap;
+ struct resource res;
+ void __iomem *base;
+ struct wiz *wiz;
+ u32 num_lanes;
+ int ret;
+
+ wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
+ if (!wiz)
+ return -ENOMEM;
+
+ wiz->type = (enum wiz_type)of_device_get_match_data(dev);
+
+ child_node = of_get_child_by_name(node, "serdes");
+ if (!child_node) {
+ dev_err(dev, "Failed to get SERDES child DT node\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(child_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get memory resource\n");
+ goto err_addr_to_resource;
+ }
+
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ goto err_addr_to_resource;
+
+ regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ ret = PTR_ERR(regmap);
+ goto err_addr_to_resource;
+ }
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret) {
+ dev_err(dev, "Failed to read num-lanes property\n");
+ goto err_addr_to_resource;
+ }
+
+ if (num_lanes > WIZ_MAX_LANES) {
+ dev_err(dev, "Cannot support %d lanes\n", num_lanes);
+ goto err_addr_to_resource;
+ }
+
+ wiz->gpio_typec_dir = devm_gpiod_get_optional(dev, "typec-dir",
+ GPIOD_IN);
+ if (IS_ERR(wiz->gpio_typec_dir)) {
+ ret = PTR_ERR(wiz->gpio_typec_dir);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request typec-dir gpio: %d\n",
+ ret);
+ goto err_addr_to_resource;
+ }
+
+ if (wiz->gpio_typec_dir) {
+ ret = of_property_read_u32(node, "typec-dir-debounce-ms",
+ &wiz->typec_dir_delay);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
+
+ /* use min. debounce from Type-C spec if not provided in DT */
+ if (ret == -EINVAL)
+ wiz->typec_dir_delay = WIZ_TYPEC_DIR_DEBOUNCE_MIN;
+
+ if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
+ wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
+ }
+
+ wiz->dev = dev;
+ wiz->regmap = regmap;
+ wiz->num_lanes = num_lanes;
+ if (wiz->type == J721E_WIZ_10G)
+ wiz->clk_mux_sel = clk_mux_sel_10g;
+ else
+ wiz->clk_mux_sel = clk_mux_sel_16g;
+
+ wiz->clk_div_sel = clk_div_sel;
+
+ if (wiz->type == J721E_WIZ_10G)
+ wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
+ else
+ wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+
+ platform_set_drvdata(pdev, wiz);
+
+ ret = wiz_regfield_init(wiz);
+ if (ret) {
+ dev_err(dev, "Failed to initialize regfields\n");
+ goto err_addr_to_resource;
+ }
+
+ phy_reset_dev = &wiz->wiz_phy_reset_dev;
+ phy_reset_dev->dev = dev;
+ phy_reset_dev->ops = &wiz_phy_reset_ops,
+ phy_reset_dev->owner = THIS_MODULE,
+ phy_reset_dev->of_node = node;
+ /* Reset for each of the lane and one for the entire SERDES */
+ phy_reset_dev->nr_resets = num_lanes + 1;
+
+ ret = devm_reset_controller_register(dev, phy_reset_dev);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to register reset controller\n");
+ goto err_addr_to_resource;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = wiz_clock_init(wiz, node);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to initialize clocks\n");
+ goto err_get_sync;
+ }
+
+ serdes_pdev = of_platform_device_create(child_node, NULL, dev);
+ if (!serdes_pdev) {
+ dev_WARN(dev, "Unable to create SERDES platform device\n");
+ goto err_pdev_create;
+ }
+ wiz->serdes_pdev = serdes_pdev;
+
+ ret = wiz_init(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ initialization failed\n");
+ goto err_wiz_init;
+ }
+
+ of_node_put(child_node);
+ return 0;
+
+err_wiz_init:
+ of_platform_device_destroy(&serdes_pdev->dev, NULL);
+
+err_pdev_create:
+ wiz_clock_cleanup(wiz, node);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+err_addr_to_resource:
+ of_node_put(child_node);
+
+ return ret;
+}
+
+static int wiz_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *serdes_pdev;
+ struct wiz *wiz;
+
+ wiz = dev_get_drvdata(dev);
+ serdes_pdev = wiz->serdes_pdev;
+
+ of_platform_device_destroy(&serdes_pdev->dev, NULL);
+ wiz_clock_cleanup(wiz, node);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static struct platform_driver wiz_driver = {
+ .probe = wiz_probe,
+ .remove = wiz_remove,
+ .driver = {
+ .name = "wiz",
+ .of_match_table = wiz_id_table,
+ },
+};
+module_platform_driver(wiz_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI J721E WIZ driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index edd6859afba8..a87946589eb7 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -850,6 +850,12 @@ static int ti_pipe3_probe(struct platform_device *pdev)
static int ti_pipe3_remove(struct platform_device *pdev)
{
+ struct ti_pipe3 *phy = platform_get_drvdata(pdev);
+
+ if (phy->mode == PIPE3_MODE_SATA) {
+ clk_disable_unprepare(phy->refclk);
+ phy->sata_refclk_enabled = false;
+ }
pm_runtime_disable(&pdev->dev);
return 0;
@@ -900,18 +906,8 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- if (!IS_ERR(phy->refclk)) {
+ if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- /*
- * SATA refclk needs an additional disable as we left it
- * on in probe to avoid Errata i783
- */
- if (phy->sata_refclk_enabled) {
- clk_disable_unprepare(phy->refclk);
- phy->sata_refclk_enabled = false;
- }
- }
-
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
}
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 8b8121e35edb..771d6fd50b45 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1583,7 +1583,6 @@ static const struct owl_pinmux_func s700_functions[] = {
[S700_MUX_USB30] = FUNCTION(usb30),
[S700_MUX_CLKO_25M] = FUNCTION(clko_25m),
[S700_MUX_MIPI_CSI] = FUNCTION(mipi_csi),
- [S700_MUX_DSI] = FUNCTION(dsi),
[S700_MUX_NAND] = FUNCTION(nand),
[S700_MUX_SPDIF] = FUNCTION(spdif),
[S700_MUX_SIRQ0] = FUNCTION(sirq0),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 95ea593fa29d..bfed0e274643 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2439,88 +2439,88 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
static const struct aspeed_pin_config aspeed_g4_configs[] = {
/* GPIO banks ranges [A, B], [D, J], [M, R] */
- { PIN_CONFIG_BIAS_PULL_DOWN, { D6, D5 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { D6, D5 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { J21, E18 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { J21, E18 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A18, E15 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { A18, E15 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D15, B14 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { D15, B14 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D18, C17 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { D18, C17 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A14, U18 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { A14, U18 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A8, E7 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { A8, E7 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { C22, E20 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_DISABLE, { C22, E20 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { J5, T1 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_DISABLE, { J5, T1 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { U1, U5 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_DISABLE, { U1, U5 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V3, V5 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_DISABLE, { V3, V5 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { W4, AB2 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_DISABLE, { W4, AB2 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V6, V7 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_DISABLE, { V6, V7 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y6, AB7 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_DISABLE, { Y6, AB7 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V20, A5 }, SCU8C, 31 },
- { PIN_CONFIG_BIAS_DISABLE, { V20, A5 }, SCU8C, 31 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D6, D5, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D6, D5, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J21, E18, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J21, E18, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, E15, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A18, E15, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D15, B14, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D15, B14, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D18, C17, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D18, C17, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A14, U18, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A14, U18, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A8, E7, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A8, E7, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C22, E20, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C22, E20, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J5, T1, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J5, T1, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U1, U5, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, U1, U5, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V3, V5, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V3, V5, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, W4, AB2, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, W4, AB2, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V6, V7, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V6, V7, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y6, AB7, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y6, AB7, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, A5, SCU8C, 31),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V20, A5, SCU8C, 31),
/* GPIOs T[0-5] (RGMII1 Tx pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { A12, A13 }, SCU90, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A12, A13 }, SCU90, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { A12, A13 }, SCU90, 12 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, A12, A13, SCU90, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A12, A13, SCU90, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A12, A13, SCU90, 12),
/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { D9, D10 }, SCU90, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D9, D10 }, SCU90, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { D9, D10 }, SCU90, 14 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, D9, D10, SCU90, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D9, D10, SCU90, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D9, D10, SCU90, 14),
/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { E11, E10 }, SCU90, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { E11, E10 }, SCU90, 13 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E11, E10, SCU90, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E11, E10, SCU90, 13),
/* GPIOs V[2-7] (RGMII2 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C9, C8 }, SCU90, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { C9, C8 }, SCU90, 15 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C9, C8, SCU90, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C9, C8, SCU90, 15),
/* ADC pull-downs (SCUA8[19:4]) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { L5, L5 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_DISABLE, { L5, L5 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L4, L4 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_DISABLE, { L4, L4 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L3, L3 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_DISABLE, { L3, L3 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L2, L2 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_DISABLE, { L2, L2 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L1, L1 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_DISABLE, { L1, L1 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M5, M5 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_DISABLE, { M5, M5 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M4, M4 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_DISABLE, { M4, M4 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M3, M3 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_DISABLE, { M3, M3 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M2, M2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { M2, M2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M1, M1 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { M1, M1 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N5, N5 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { N5, N5 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N4, N4 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { N4, N4 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N3, N3 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { N3, N3 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N2, N2 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { N2, N2 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N1, N1 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { N1, N1 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { P5, P5 }, SCUA8, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { P5, P5 }, SCUA8, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L5, L5, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L5, L5, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L4, L4, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L4, L4, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3, L3, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L3, L3, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L2, L2, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L2, L2, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L1, L1, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L1, L1, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M5, M5, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M5, M5, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M4, M4, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M4, M4, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M3, M3, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M3, M3, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M2, M2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M2, M2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M1, M1, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M1, M1, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N5, N5, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N5, N5, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N4, N4, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N4, N4, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N3, N3, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N3, N3, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N2, N2, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N2, N2, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N1, N1, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N1, N1, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, P5, P5, SCUA8, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, P5, P5, SCUA8, 19),
/*
* Debounce settings for GPIOs D and E passthrough mode are in
@@ -2531,14 +2531,14 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = {
* controller. Due to this tangle between GPIO and pinctrl we don't yet
* fully support pass-through debounce.
*/
- { PIN_CONFIG_INPUT_DEBOUNCE, { A18, D16 }, SCUA8, 20 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B17, A17 }, SCUA8, 21 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { C16, B16 }, SCUA8, 22 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { A16, E15 }, SCUA8, 23 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { D15, C15 }, SCUA8, 24 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B15, A15 }, SCUA8, 25 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { E14, D14 }, SCUA8, 26 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A18, D16, SCUA8, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B17, A17, SCUA8, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C16, B16, SCUA8, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A16, E15, SCUA8, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D15, C15, SCUA8, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B15, A15, SCUA8, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E14, D14, SCUA8, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C14, B14, SCUA8, 27),
};
static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
@@ -2594,6 +2594,14 @@ static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g4_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 0, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g4_ops = {
.set = aspeed_g4_sig_expr_set,
};
@@ -2610,6 +2618,8 @@ static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
},
.configs = aspeed_g4_configs,
.nconfigs = ARRAY_SIZE(aspeed_g4_configs),
+ .confmaps = aspeed_g4_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g4_pin_config_map),
};
static const struct pinmux_ops aspeed_g4_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index d8a804b9f958..0cab4c2576e2 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2476,124 +2476,124 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
static struct aspeed_pin_config aspeed_g5_configs[] = {
/* GPIOA, GPIOQ */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B14, B13 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { B14, B13 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A11, N20 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { A11, N20 }, SCU8C, 16 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B14, B13, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B14, B13, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A11, N20, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A11, N20, SCU8C, 16),
/* GPIOB, GPIOR */
- { PIN_CONFIG_BIAS_PULL_DOWN, { K19, H20 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { K19, H20 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { AA19, E10 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { AA19, E10 }, SCU8C, 17 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, K19, H20, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, K19, H20, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, AA19, E10, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, AA19, E10, SCU8C, 17),
/* GPIOC, GPIOS*/
- { PIN_CONFIG_BIAS_PULL_DOWN, { C12, B11 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { C12, B11 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V20, AA20 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { V20, AA20 }, SCU8C, 18 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C12, B11, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C12, B11, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, AA20, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V20, AA20, SCU8C, 18),
/* GPIOD, GPIOY */
- { PIN_CONFIG_BIAS_PULL_DOWN, { F19, C21 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { F19, C21 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { R22, P20 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { R22, P20 }, SCU8C, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F19, C21, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F19, C21, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R22, P20, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, R22, P20, SCU8C, 19),
/* GPIOE, GPIOZ */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B20, B19 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { B20, B19 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y20, W21 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { Y20, W21 }, SCU8C, 20 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B20, B19, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B20, B19, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y20, W21, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y20, W21, SCU8C, 20),
/* GPIOF, GPIOAA */
- { PIN_CONFIG_BIAS_PULL_DOWN, { J19, H18 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { J19, H18 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y21, P19 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { Y21, P19 }, SCU8C, 21 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J19, H18, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J19, H18, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y21, P19, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y21, P19, SCU8C, 21),
- /* GPIOG, GPIOAB */
- { PIN_CONFIG_BIAS_PULL_DOWN, { A19, E14 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { A19, E14 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N19, R20 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { N19, R20 }, SCU8C, 22 },
+ /* GPIOG, GPIOAB */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A19, E14, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A19, E14, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N19, R20, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N19, R20, SCU8C, 22),
/* GPIOH, GPIOAC */
- { PIN_CONFIG_BIAS_PULL_DOWN, { A18, D18 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { A18, D18 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G21, G22 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { G21, G22 }, SCU8C, 23 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, D18, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A18, D18, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G21, G22, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G21, G22, SCU8C, 23),
/* GPIOs [I, P] */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C18, A15 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_DISABLE, { C18, A15 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { R2, T3 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_DISABLE, { R2, T3 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L3, R1 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_DISABLE, { L3, R1 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { T2, W1 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_DISABLE, { T2, W1 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y1, T5 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_DISABLE, { Y1, T5 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V2, T4 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_DISABLE, { V2, T4 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { U5, W4 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_DISABLE, { U5, W4 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V4, V6 }, SCU8C, 31 },
- { PIN_CONFIG_BIAS_DISABLE, { V4, V6 }, SCU8C, 31 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C18, A15, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C18, A15, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R2, T3, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, R2, T3, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3, R1, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L3, R1, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, T2, W1, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, T2, W1, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1, T5, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y1, T5, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V2, T4, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V2, T4, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U5, W4, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, U5, W4, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V4, V6, SCU8C, 31),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V4, V6, SCU8C, 31),
/* GPIOs T[0-5] (RGMII1 Tx pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { B5, B5 }, SCU90, 8 },
- { PIN_CONFIG_DRIVE_STRENGTH, { E9, A5 }, SCU90, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { B5, D7 }, SCU90, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { B5, D7 }, SCU90, 12 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B5, B5, SCU90, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, E9, A5, SCU90, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B5, D7, SCU90, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B5, D7, SCU90, 12),
/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { B2, B2 }, SCU90, 10 },
- { PIN_CONFIG_DRIVE_STRENGTH, { B1, B3 }, SCU90, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { B2, D4 }, SCU90, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { B2, D4 }, SCU90, 14 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B2, B2, SCU90, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B1, B3, SCU90, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B2, D4, SCU90, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B2, D4, SCU90, 14),
/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B4, C4 }, SCU90, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { B4, C4 }, SCU90, 13 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B4, C4, SCU90, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B4, C4, SCU90, 13),
/* GPIOs V[2-7] (RGMII2 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C2, E6 }, SCU90, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { C2, E6 }, SCU90, 15 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C2, E6, SCU90, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C2, E6, SCU90, 15),
/* ADC pull-downs (SCUA8[19:4]) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { F4, F4 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_DISABLE, { F4, F4 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F5, F5 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_DISABLE, { F5, F5 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E2, E2 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_DISABLE, { E2, E2 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E1, E1 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_DISABLE, { E1, E1 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F3, F3 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_DISABLE, { F3, F3 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E3, E3 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_DISABLE, { E3, E3 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G5, G5 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_DISABLE, { G5, G5 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G4, G4 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_DISABLE, { G4, G4 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F2, F2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { F2, F2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G3, G3 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { G3, G3 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G2, G2 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { G2, G2 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F1, F1 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { F1, F1 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H5, H5 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { H5, H5 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G1, G1 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { G1, G1 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H3, H3 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { H3, H3 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H4, H4 }, SCUA8, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { H4, H4 }, SCUA8, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F4, F4, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F4, F4, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F5, F5, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F5, F5, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E2, E2, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E2, E2, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E1, E1, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E1, E1, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F3, F3, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F3, F3, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E3, E3, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E3, E3, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G5, G5, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G5, G5, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G4, G4, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G4, G4, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F2, F2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F2, F2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G3, G3, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G3, G3, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G2, G2, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G2, G2, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F1, F1, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F1, F1, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H5, H5, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H5, H5, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G1, G1, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G1, G1, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H3, H3, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H3, H3, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H4, H4, SCUA8, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H4, H4, SCUA8, 19),
/*
* Debounce settings for GPIOs D and E passthrough mode are in
@@ -2604,14 +2604,14 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
* controller. Due to this tangle between GPIO and pinctrl we don't yet
* fully support pass-through debounce.
*/
- { PIN_CONFIG_INPUT_DEBOUNCE, { F19, E21 }, SCUA8, 20 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { F20, D20 }, SCUA8, 21 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { D21, E20 }, SCUA8, 22 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { G18, C21 }, SCUA8, 23 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B20, C20 }, SCUA8, 24 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { F18, F17 }, SCUA8, 25 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { E18, D19 }, SCUA8, 26 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F19, E21, SCUA8, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F20, D20, SCUA8, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D21, E20, SCUA8, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, G18, C21, SCUA8, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B20, C20, SCUA8, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F18, F17, SCUA8, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E18, D19, SCUA8, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A20, B19, SCUA8, 27),
};
static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
@@ -2780,6 +2780,14 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g5_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 0, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
.eval = aspeed_g5_sig_expr_eval,
.set = aspeed_g5_sig_expr_set,
@@ -2797,6 +2805,8 @@ static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
},
.configs = aspeed_g5_configs,
.nconfigs = ARRAY_SIZE(aspeed_g5_configs),
+ .confmaps = aspeed_g5_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g5_pin_config_map),
};
static const struct pinmux_ops aspeed_g5_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index bb07024d22ed..fa32c3e9c9d1 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -26,7 +26,10 @@
#define SCU430 0x430 /* Multi-function Pin Control #8 */
#define SCU434 0x434 /* Multi-function Pin Control #9 */
#define SCU438 0x438 /* Multi-function Pin Control #10 */
+#define SCU440 0x440 /* USB Multi-function Pin Control #12 */
#define SCU450 0x450 /* Multi-function Pin Control #14 */
+#define SCU454 0x454 /* Multi-function Pin Control #15 */
+#define SCU458 0x458 /* Multi-function Pin Control #16 */
#define SCU4B0 0x4B0 /* Multi-function Pin Control #17 */
#define SCU4B4 0x4B4 /* Multi-function Pin Control #18 */
#define SCU4B8 0x4B8 /* Multi-function Pin Control #19 */
@@ -35,9 +38,17 @@
#define SCU4D8 0x4D8 /* Multi-function Pin Control #23 */
#define SCU500 0x500 /* Hardware Strap 1 */
#define SCU510 0x510 /* Hardware Strap 2 */
+#define SCU610 0x610 /* Disable GPIO Internal Pull-Down #0 */
+#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
+#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
+#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
+#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
+#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU694 0x694 /* Multi-function Pin Control #25 */
+#define SCUC20 0xC20 /* PCIE configuration Setting Control */
-#define ASPEED_G6_NR_PINS 248
+#define ASPEED_G6_NR_PINS 256
#define M24 0
SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0));
@@ -1534,6 +1545,78 @@ GROUP_DECL(I3C4, AE25, AF24);
FUNC_DECL_2(I3C4, HVI3C4, I3C4);
FUNC_GROUP_DECL(FSI2, AE25, AF24);
+#define AF23 248
+SIG_EXPR_LIST_DECL_SESG(AF23, I3C1SCL, I3C1, SIG_DESC_SET(SCU438, 16));
+PIN_DECL_(AF23, SIG_EXPR_LIST_PTR(AF23, I3C1SCL));
+
+#define AE24 249
+SIG_EXPR_LIST_DECL_SESG(AE24, I3C1SDA, I3C1, SIG_DESC_SET(SCU438, 17));
+PIN_DECL_(AE24, SIG_EXPR_LIST_PTR(AE24, I3C1SDA));
+
+FUNC_GROUP_DECL(I3C1, AF23, AE24);
+
+#define AF22 250
+SIG_EXPR_LIST_DECL_SESG(AF22, I3C2SCL, I3C2, SIG_DESC_SET(SCU438, 18));
+PIN_DECL_(AF22, SIG_EXPR_LIST_PTR(AF22, I3C2SCL));
+
+#define AE22 251
+SIG_EXPR_LIST_DECL_SESG(AE22, I3C2SDA, I3C2, SIG_DESC_SET(SCU438, 19));
+PIN_DECL_(AE22, SIG_EXPR_LIST_PTR(AE22, I3C2SDA));
+
+FUNC_GROUP_DECL(I3C2, AF22, AE22);
+
+#define USB2ADP_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 0, 0 }
+#define USB2AD_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 1, 0 }
+#define USB2AH_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 2, 0 }
+#define USB2AHP_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 3, 0 }
+#define USB11BHID_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 0, 0 }
+#define USB2BD_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 1, 0 }
+#define USB2BH_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 2, 0 }
+
+#define A4 252
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADPDP, USBA, USB2ADP, USB2ADP_DESC,
+ SIG_DESC_SET(SCUC20, 16));
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADDP, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHDP, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(A4, SIG_EXPR_LIST_PTR(A4, USB2ADPDP), SIG_EXPR_LIST_PTR(A4, USB2ADDP),
+ SIG_EXPR_LIST_PTR(A4, USB2AHDP));
+
+#define B4 253
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADPDN, USBA, USB2ADP, USB2ADP_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADDN, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHDN, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHPDN, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(B4, SIG_EXPR_LIST_PTR(B4, USB2ADPDN), SIG_EXPR_LIST_PTR(B4, USB2ADDN),
+ SIG_EXPR_LIST_PTR(B4, USB2AHDN));
+
+GROUP_DECL(USBA, A4, B4);
+
+FUNC_DECL_1(USB2ADP, USBA);
+FUNC_DECL_1(USB2AD, USBA);
+FUNC_DECL_1(USB2AH, USBA);
+FUNC_DECL_1(USB2AHP, USBA);
+
+#define A6 254
+SIG_EXPR_LIST_DECL_SEMG(A6, USB11BDP, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BDDP, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BHDP, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(A6, SIG_EXPR_LIST_PTR(A6, USB11BDP), SIG_EXPR_LIST_PTR(A6, USB2BDDP),
+ SIG_EXPR_LIST_PTR(A6, USB2BHDP));
+
+#define B6 255
+SIG_EXPR_LIST_DECL_SEMG(B6, USB11BDN, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BDDN, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BHDN, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(B6, SIG_EXPR_LIST_PTR(B6, USB11BDN), SIG_EXPR_LIST_PTR(B6, USB2BDDN),
+ SIG_EXPR_LIST_PTR(B6, USB2BHDN));
+
+GROUP_DECL(USBB, A6, B6);
+
+FUNC_DECL_1(USB11BHID, USBB);
+FUNC_DECL_1(USB2BD, USBB);
+FUNC_DECL_1(USB2BH, USBB);
+
/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
@@ -1554,6 +1637,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(A24),
ASPEED_PINCTRL_PIN(A25),
ASPEED_PINCTRL_PIN(A3),
+ ASPEED_PINCTRL_PIN(A4),
+ ASPEED_PINCTRL_PIN(A6),
ASPEED_PINCTRL_PIN(AA11),
ASPEED_PINCTRL_PIN(AA12),
ASPEED_PINCTRL_PIN(AA16),
@@ -1625,6 +1710,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AE16),
ASPEED_PINCTRL_PIN(AE18),
ASPEED_PINCTRL_PIN(AE19),
+ ASPEED_PINCTRL_PIN(AE22),
+ ASPEED_PINCTRL_PIN(AE24),
ASPEED_PINCTRL_PIN(AE25),
ASPEED_PINCTRL_PIN(AE26),
ASPEED_PINCTRL_PIN(AE7),
@@ -1634,6 +1721,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AF12),
ASPEED_PINCTRL_PIN(AF14),
ASPEED_PINCTRL_PIN(AF15),
+ ASPEED_PINCTRL_PIN(AF22),
+ ASPEED_PINCTRL_PIN(AF23),
ASPEED_PINCTRL_PIN(AF24),
ASPEED_PINCTRL_PIN(AF25),
ASPEED_PINCTRL_PIN(AF7),
@@ -1654,6 +1743,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(B25),
ASPEED_PINCTRL_PIN(B26),
ASPEED_PINCTRL_PIN(B3),
+ ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B6),
ASPEED_PINCTRL_PIN(C1),
ASPEED_PINCTRL_PIN(C11),
ASPEED_PINCTRL_PIN(C12),
@@ -1847,6 +1938,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(I2C7),
ASPEED_PINCTRL_GROUP(I2C8),
ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(I3C1),
+ ASPEED_PINCTRL_GROUP(I3C2),
ASPEED_PINCTRL_GROUP(I3C3),
ASPEED_PINCTRL_GROUP(I3C4),
ASPEED_PINCTRL_GROUP(I3C5),
@@ -2012,6 +2105,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(UART7),
ASPEED_PINCTRL_GROUP(UART8),
ASPEED_PINCTRL_GROUP(UART9),
+ ASPEED_PINCTRL_GROUP(USBA),
+ ASPEED_PINCTRL_GROUP(USBB),
ASPEED_PINCTRL_GROUP(VB),
ASPEED_PINCTRL_GROUP(VGAHS),
ASPEED_PINCTRL_GROUP(VGAVS),
@@ -2079,6 +2174,8 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(I2C7),
ASPEED_PINCTRL_FUNC(I2C8),
ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(I3C1),
+ ASPEED_PINCTRL_FUNC(I3C2),
ASPEED_PINCTRL_FUNC(I3C3),
ASPEED_PINCTRL_FUNC(I3C4),
ASPEED_PINCTRL_FUNC(I3C5),
@@ -2221,6 +2318,13 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(UART7),
ASPEED_PINCTRL_FUNC(UART8),
ASPEED_PINCTRL_FUNC(UART9),
+ ASPEED_PINCTRL_FUNC(USB11BHID),
+ ASPEED_PINCTRL_FUNC(USB2AD),
+ ASPEED_PINCTRL_FUNC(USB2ADP),
+ ASPEED_PINCTRL_FUNC(USB2AH),
+ ASPEED_PINCTRL_FUNC(USB2AHP),
+ ASPEED_PINCTRL_FUNC(USB2BD),
+ ASPEED_PINCTRL_FUNC(USB2BH),
ASPEED_PINCTRL_FUNC(VB),
ASPEED_PINCTRL_FUNC(VGAHS),
ASPEED_PINCTRL_FUNC(VGAVS),
@@ -2230,6 +2334,260 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(WDTRST4),
};
+static struct aspeed_pin_config aspeed_g6_configs[] = {
+ /* GPIOB7 */
+ ASPEED_PULL_DOWN_PINCONF(J24, SCU610, 15),
+ /* GPIOB6 */
+ ASPEED_PULL_DOWN_PINCONF(H25, SCU610, 14),
+ /* GPIOB5 */
+ ASPEED_PULL_DOWN_PINCONF(G26, SCU610, 13),
+ /* GPIOB4 */
+ ASPEED_PULL_DOWN_PINCONF(J23, SCU610, 12),
+ /* GPIOB3 */
+ ASPEED_PULL_DOWN_PINCONF(J25, SCU610, 11),
+ /* GPIOB2 */
+ ASPEED_PULL_DOWN_PINCONF(H26, SCU610, 10),
+ /* GPIOB1 */
+ ASPEED_PULL_DOWN_PINCONF(K23, SCU610, 9),
+ /* GPIOB0 */
+ ASPEED_PULL_DOWN_PINCONF(J26, SCU610, 8),
+
+ /* GPIOH3 */
+ ASPEED_PULL_DOWN_PINCONF(A17, SCU614, 27),
+ /* GPIOH2 */
+ ASPEED_PULL_DOWN_PINCONF(C18, SCU614, 26),
+ /* GPIOH1 */
+ ASPEED_PULL_DOWN_PINCONF(B18, SCU614, 25),
+ /* GPIOH0 */
+ ASPEED_PULL_DOWN_PINCONF(A18, SCU614, 24),
+
+ /* GPIOL7 */
+ ASPEED_PULL_DOWN_PINCONF(C14, SCU618, 31),
+ /* GPIOL6 */
+ ASPEED_PULL_DOWN_PINCONF(B14, SCU618, 30),
+ /* GPIOL5 */
+ ASPEED_PULL_DOWN_PINCONF(F15, SCU618, 29),
+ /* GPIOL4 */
+ ASPEED_PULL_DOWN_PINCONF(C15, SCU618, 28),
+
+ /* GPIOJ7 */
+ ASPEED_PULL_UP_PINCONF(D19, SCU618, 15),
+ /* GPIOJ6 */
+ ASPEED_PULL_UP_PINCONF(C20, SCU618, 14),
+ /* GPIOJ5 */
+ ASPEED_PULL_UP_PINCONF(A19, SCU618, 13),
+ /* GPIOJ4 */
+ ASPEED_PULL_UP_PINCONF(C19, SCU618, 12),
+ /* GPIOJ3 */
+ ASPEED_PULL_UP_PINCONF(D20, SCU618, 11),
+ /* GPIOJ2 */
+ ASPEED_PULL_UP_PINCONF(E19, SCU618, 10),
+ /* GPIOJ1 */
+ ASPEED_PULL_UP_PINCONF(A20, SCU618, 9),
+ /* GPIOJ0 */
+ ASPEED_PULL_UP_PINCONF(B20, SCU618, 8),
+
+ /* GPIOI7 */
+ ASPEED_PULL_DOWN_PINCONF(A15, SCU618, 7),
+ /* GPIOI6 */
+ ASPEED_PULL_DOWN_PINCONF(B16, SCU618, 6),
+ /* GPIOI5 */
+ ASPEED_PULL_DOWN_PINCONF(E16, SCU618, 5),
+ /* GPIOI4 */
+ ASPEED_PULL_DOWN_PINCONF(C16, SCU618, 4),
+ /* GPIOI3 */
+ ASPEED_PULL_DOWN_PINCONF(D16, SCU618, 3),
+ /* GPIOI2 */
+ ASPEED_PULL_DOWN_PINCONF(E17, SCU618, 2),
+ /* GPIOI1 */
+ ASPEED_PULL_DOWN_PINCONF(A16, SCU618, 1),
+ /* GPIOI0 */
+ ASPEED_PULL_DOWN_PINCONF(D17, SCU618, 0),
+
+ /* GPIOP7 */
+ ASPEED_PULL_DOWN_PINCONF(Y23, SCU61C, 31),
+ /* GPIOP6 */
+ ASPEED_PULL_DOWN_PINCONF(AB24, SCU61C, 30),
+ /* GPIOP5 */
+ ASPEED_PULL_DOWN_PINCONF(AB23, SCU61C, 29),
+ /* GPIOP4 */
+ ASPEED_PULL_DOWN_PINCONF(W23, SCU61C, 28),
+ /* GPIOP3 */
+ ASPEED_PULL_DOWN_PINCONF(AA24, SCU61C, 27),
+ /* GPIOP2 */
+ ASPEED_PULL_DOWN_PINCONF(AA23, SCU61C, 26),
+ /* GPIOP1 */
+ ASPEED_PULL_DOWN_PINCONF(W24, SCU61C, 25),
+ /* GPIOP0 */
+ ASPEED_PULL_DOWN_PINCONF(AB22, SCU61C, 24),
+
+ /* GPIOO7 */
+ ASPEED_PULL_DOWN_PINCONF(AC23, SCU61C, 23),
+ /* GPIOO6 */
+ ASPEED_PULL_DOWN_PINCONF(AC24, SCU61C, 22),
+ /* GPIOO5 */
+ ASPEED_PULL_DOWN_PINCONF(AC22, SCU61C, 21),
+ /* GPIOO4 */
+ ASPEED_PULL_DOWN_PINCONF(AD25, SCU61C, 20),
+ /* GPIOO3 */
+ ASPEED_PULL_DOWN_PINCONF(AD24, SCU61C, 19),
+ /* GPIOO2 */
+ ASPEED_PULL_DOWN_PINCONF(AD23, SCU61C, 18),
+ /* GPIOO1 */
+ ASPEED_PULL_DOWN_PINCONF(AD22, SCU61C, 17),
+ /* GPIOO0 */
+ ASPEED_PULL_DOWN_PINCONF(AD26, SCU61C, 16),
+
+ /* GPION7 */
+ ASPEED_PULL_DOWN_PINCONF(M26, SCU61C, 15),
+ /* GPION6 */
+ ASPEED_PULL_DOWN_PINCONF(N26, SCU61C, 14),
+ /* GPION5 */
+ ASPEED_PULL_DOWN_PINCONF(M23, SCU61C, 13),
+ /* GPION4 */
+ ASPEED_PULL_DOWN_PINCONF(P26, SCU61C, 12),
+ /* GPION3 */
+ ASPEED_PULL_DOWN_PINCONF(N24, SCU61C, 11),
+ /* GPION2 */
+ ASPEED_PULL_DOWN_PINCONF(N25, SCU61C, 10),
+ /* GPION1 */
+ ASPEED_PULL_DOWN_PINCONF(N23, SCU61C, 9),
+ /* GPION0 */
+ ASPEED_PULL_DOWN_PINCONF(P25, SCU61C, 8),
+
+ /* GPIOM7 */
+ ASPEED_PULL_DOWN_PINCONF(D13, SCU61C, 7),
+ /* GPIOM6 */
+ ASPEED_PULL_DOWN_PINCONF(C13, SCU61C, 6),
+ /* GPIOM5 */
+ ASPEED_PULL_DOWN_PINCONF(C12, SCU61C, 5),
+ /* GPIOM4 */
+ ASPEED_PULL_DOWN_PINCONF(B12, SCU61C, 4),
+ /* GPIOM3 */
+ ASPEED_PULL_DOWN_PINCONF(E14, SCU61C, 3),
+ /* GPIOM2 */
+ ASPEED_PULL_DOWN_PINCONF(A12, SCU61C, 2),
+ /* GPIOM1 */
+ ASPEED_PULL_DOWN_PINCONF(B13, SCU61C, 1),
+ /* GPIOM0 */
+ ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
+
+ /* GPIOS7 */
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ /* GPIOS6 */
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ /* GPIOS5 */
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ /* GPIOS4 */
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ /* GPIOS3*/
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ /* GPIOS2 */
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ /* GPIOS1 */
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ /* GPIOS0 */
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+
+ /* GPIOR7 */
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ /* GPIOR6 */
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ /* GPIOR5 */
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ /* GPIOR4 */
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ /* GPIOR3*/
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ /* GPIOR2 */
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ /* GPIOR1 */
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ /* GPIOR0 */
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+
+ /* GPIOX7 */
+ ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
+ /* GPIOX6 */
+ ASPEED_PULL_DOWN_PINCONF(AF9, SCU634, 30),
+ /* GPIOX5 */
+ ASPEED_PULL_DOWN_PINCONF(AD9, SCU634, 29),
+ /* GPIOX4 */
+ ASPEED_PULL_DOWN_PINCONF(AB9, SCU634, 28),
+ /* GPIOX3*/
+ ASPEED_PULL_DOWN_PINCONF(AF8, SCU634, 27),
+ /* GPIOX2 */
+ ASPEED_PULL_DOWN_PINCONF(AC9, SCU634, 26),
+ /* GPIOX1 */
+ ASPEED_PULL_DOWN_PINCONF(AA9, SCU634, 25),
+ /* GPIOX0 */
+ ASPEED_PULL_DOWN_PINCONF(AE8, SCU634, 24),
+
+ /* GPIOV7 */
+ ASPEED_PULL_DOWN_PINCONF(AF15, SCU634, 15),
+ /* GPIOV6 */
+ ASPEED_PULL_DOWN_PINCONF(AD15, SCU634, 14),
+ /* GPIOV5 */
+ ASPEED_PULL_DOWN_PINCONF(AE14, SCU634, 13),
+ /* GPIOV4 */
+ ASPEED_PULL_DOWN_PINCONF(AE15, SCU634, 12),
+ /* GPIOV3*/
+ ASPEED_PULL_DOWN_PINCONF(AC15, SCU634, 11),
+ /* GPIOV2 */
+ ASPEED_PULL_DOWN_PINCONF(AD14, SCU634, 10),
+ /* GPIOV1 */
+ ASPEED_PULL_DOWN_PINCONF(AF14, SCU634, 9),
+ /* GPIOV0 */
+ ASPEED_PULL_DOWN_PINCONF(AB15, SCU634, 8),
+
+ /* GPIOZ7 */
+ ASPEED_PULL_DOWN_PINCONF(AF10, SCU638, 15),
+ /* GPIOZ6 */
+ ASPEED_PULL_DOWN_PINCONF(AD11, SCU638, 14),
+ /* GPIOZ5 */
+ ASPEED_PULL_DOWN_PINCONF(AA11, SCU638, 13),
+ /* GPIOZ4 */
+ ASPEED_PULL_DOWN_PINCONF(AC11, SCU638, 12),
+ /* GPIOZ3*/
+ ASPEED_PULL_DOWN_PINCONF(AB11, SCU638, 11),
+
+ /* GPIOZ1 */
+ ASPEED_PULL_DOWN_PINCONF(AD10, SCU638, 9),
+ /* GPIOZ0 */
+ ASPEED_PULL_DOWN_PINCONF(AC10, SCU638, 8),
+
+ /* GPIOY6 */
+ ASPEED_PULL_DOWN_PINCONF(AC12, SCU638, 6),
+ /* GPIOY5 */
+ ASPEED_PULL_DOWN_PINCONF(AF12, SCU638, 5),
+ /* GPIOY4 */
+ ASPEED_PULL_DOWN_PINCONF(AE12, SCU638, 4),
+ /* GPIOY3 */
+ ASPEED_PULL_DOWN_PINCONF(AA12, SCU638, 3),
+ /* GPIOY2 */
+ ASPEED_PULL_DOWN_PINCONF(AE11, SCU638, 2),
+ /* GPIOY1 */
+ ASPEED_PULL_DOWN_PINCONF(AD12, SCU638, 1),
+ /* GPIOY0 */
+ ASPEED_PULL_DOWN_PINCONF(AF11, SCU638, 0),
+
+ /* LAD3 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AC7, AC7 }, SCU454, GENMASK(31, 30)},
+ /* LAD2 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AC8, AC8 }, SCU454, GENMASK(29, 28)},
+ /* LAD1 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)},
+ /* LAD0 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)},
+
+ /* MAC3 */
+ { PIN_CONFIG_POWER_SOURCE, { H24, E26 }, SCU458, BIT_MASK(4)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { H24, E26 }, SCU458, GENMASK(1, 0)},
+ /* MAC4 */
+ { PIN_CONFIG_POWER_SOURCE, { F24, B24 }, SCU458, BIT_MASK(5)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)},
+};
+
/**
* Configure a pin's signal by applying an expression's descriptor state for
* all descriptors in the expression.
@@ -2297,6 +2655,20 @@ static int aspeed_g6_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g6_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 4, 0, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 1, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 12, 2, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 3, GENMASK(1, 0)},
+ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)},
+ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
.set = aspeed_g6_sig_expr_set,
};
@@ -2311,6 +2683,10 @@ static struct aspeed_pinctrl_data aspeed_g6_pinctrl_data = {
.functions = aspeed_g6_functions,
.nfunctions = ARRAY_SIZE(aspeed_g6_functions),
},
+ .configs = aspeed_g6_configs,
+ .nconfigs = ARRAY_SIZE(aspeed_g6_configs),
+ .confmaps = aspeed_g6_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g6_pin_config_map),
};
static const struct pinmux_ops aspeed_g6_pinmux_ops = {
@@ -2331,12 +2707,21 @@ static const struct pinctrl_ops aspeed_g6_pinctrl_ops = {
.dt_free_map = pinctrl_utils_free_map,
};
+static const struct pinconf_ops aspeed_g6_conf_ops = {
+ .is_generic = true,
+ .pin_config_get = aspeed_pin_config_get,
+ .pin_config_set = aspeed_pin_config_set,
+ .pin_config_group_get = aspeed_pin_config_group_get,
+ .pin_config_group_set = aspeed_pin_config_group_set,
+};
+
static struct pinctrl_desc aspeed_g6_pinctrl_desc = {
.name = "aspeed-g6-pinctrl",
.pins = aspeed_g6_pins,
.npins = ARRAY_SIZE(aspeed_g6_pins),
.pctlops = &aspeed_g6_pinctrl_ops,
.pmxops = &aspeed_g6_pinmux_ops,
+ .confops = &aspeed_g6_conf_ops,
};
static int aspeed_g6_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 54933665b5f8..b625a657171e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -411,49 +411,21 @@ static inline const struct aspeed_pin_config *find_pinconf_config(
return NULL;
}
-/*
- * Aspeed pin configuration description.
- *
- * @param: pinconf configuration parameter
- * @arg: The supported argument for @param, or -1 if any value is supported
- * @val: The register value to write to configure @arg for @param
- *
- * The map is to be used in conjunction with the configuration array supplied
- * by the driver implementation.
- */
-struct aspeed_pin_config_map {
- enum pin_config_param param;
- s32 arg;
- u32 val;
-};
-
enum aspeed_pin_config_map_type { MAP_TYPE_ARG, MAP_TYPE_VAL };
-/* Aspeed consistently both:
- *
- * 1. Defines "disable bits" for internal pull-downs
- * 2. Uses 8mA or 16mA drive strengths
- */
-static const struct aspeed_pin_config_map pin_config_map[] = {
- { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1 },
- { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0 },
- { PIN_CONFIG_BIAS_DISABLE, -1, 1 },
- { PIN_CONFIG_DRIVE_STRENGTH, 8, 0 },
- { PIN_CONFIG_DRIVE_STRENGTH, 16, 1 },
-};
-
static const struct aspeed_pin_config_map *find_pinconf_map(
+ const struct aspeed_pinctrl_data *pdata,
enum pin_config_param param,
enum aspeed_pin_config_map_type type,
s64 value)
{
int i;
- for (i = 0; i < ARRAY_SIZE(pin_config_map); i++) {
+ for (i = 0; i < pdata->nconfmaps; i++) {
const struct aspeed_pin_config_map *elem;
bool match;
- elem = &pin_config_map[i];
+ elem = &pdata->confmaps[i];
switch (type) {
case MAP_TYPE_ARG:
@@ -491,8 +463,8 @@ int aspeed_pin_config_get(struct pinctrl_dev *pctldev, unsigned int offset,
if (rc < 0)
return rc;
- pmap = find_pinconf_map(param, MAP_TYPE_VAL,
- (val & BIT(pconf->bit)) >> pconf->bit);
+ pmap = find_pinconf_map(pdata, param, MAP_TYPE_VAL,
+ (val & pconf->mask) >> __ffs(pconf->mask));
if (!pmap)
return -EINVAL;
@@ -535,22 +507,22 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
if (!pconf)
return -ENOTSUPP;
- pmap = find_pinconf_map(param, MAP_TYPE_ARG, arg);
+ pmap = find_pinconf_map(pdata, param, MAP_TYPE_ARG, arg);
if (WARN_ON(!pmap))
return -EINVAL;
- val = pmap->val << pconf->bit;
+ val = pmap->val << __ffs(pconf->mask);
rc = regmap_update_bits(pdata->scu, pconf->reg,
- BIT(pconf->bit), val);
+ pmap->mask, val);
if (rc < 0)
return rc;
- pr_debug("%s: Set SCU%02X[%d]=%d for param %d(=%d) on pin %d\n",
- __func__, pconf->reg, pconf->bit, pmap->val,
- param, arg, offset);
+ pr_debug("%s: Set SCU%02X[%lu]=%d for param %d(=%d) on pin %d\n",
+ __func__, pconf->reg, __ffs(pconf->mask),
+ pmap->val, param, arg, offset);
}
return 0;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index a5d83986f32e..4dcde3bc29c8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -24,8 +24,7 @@ struct aspeed_pin_config {
enum pin_config_param param;
unsigned int pins[2];
unsigned int reg;
- u8 bit;
- u8 value;
+ u32 mask;
};
#define ASPEED_PINCTRL_PIN(name_) \
@@ -35,6 +34,38 @@ struct aspeed_pin_config {
.drv_data = (void *) &(PIN_SYM(name_)) \
}
+#define ASPEED_SB_PINCONF(param_, pin0_, pin1_, reg_, bit_) { \
+ .param = param_, \
+ .pins = {pin0_, pin1_}, \
+ .reg = reg_, \
+ .mask = BIT_MASK(bit_) \
+}
+
+#define ASPEED_PULL_DOWN_PINCONF(pin_, reg_, bit_) \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, pin_, pin_, reg_, bit_), \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+
+#define ASPEED_PULL_UP_PINCONF(pin_, reg_, bit_) \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_UP, pin_, pin_, reg_, bit_), \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+/*
+ * Aspeed pin configuration description.
+ *
+ * @param: pinconf configuration parameter
+ * @arg: The supported argument for @param, or -1 if any value is supported
+ * @val: The register value to write to configure @arg for @param
+ * @mask: The bitfield mask for @val
+ *
+ * The map is to be used in conjunction with the configuration array supplied
+ * by the driver implementation.
+ */
+struct aspeed_pin_config_map {
+ enum pin_config_param param;
+ s32 arg;
+ u32 val;
+ u32 mask;
+};
+
struct aspeed_pinctrl_data {
struct regmap *scu;
@@ -45,6 +76,9 @@ struct aspeed_pinctrl_data {
const unsigned int nconfigs;
struct aspeed_pinmux_data pinmux;
+
+ const struct aspeed_pin_config_map *confmaps;
+ const unsigned int nconfmaps;
};
/* Aspeed pinctrl helpers */
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 140c5ce9fbc1..f86739e800c3 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -737,6 +737,7 @@ struct aspeed_pin_desc {
#define FUNC_DECL_(func, ...) \
static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
+#define FUNC_DECL_1(func, group) FUNC_DECL_(func, #group)
#define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 831a9318c384..25166217c3e0 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -286,6 +286,12 @@ static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
iproc_set_bit(chip, IPROC_GPIO_INT_DE_OFFSET, gpio, dual_edge);
iproc_set_bit(chip, IPROC_GPIO_INT_EDGE_OFFSET, gpio,
rising_or_high);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev,
@@ -843,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
"gpio-ranges");
/* optional GPIO interrupt support */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
struct irq_chip *irqc;
struct gpio_irq_chip *girq;
@@ -868,7 +874,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
+ girq->handler = handle_bad_irq;
}
ret = gpiochip_add_data(gc, chip);
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 32f268f173d1..57044ab376d3 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1049,7 +1049,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -EINVAL;
- pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
dev_err(&pdev->dev, "unable to map I/O space\n");
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 3756fc9d5826..f1d60a708815 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -578,7 +578,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -EINVAL;
- pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
dev_err(&pdev->dev, "unable to map I/O space\n");
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index e2f72dcce4c9..7b6409ef553c 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -560,7 +560,6 @@ static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev,
seq_puts(s, " SCHMITT");
}
-
static const struct pinctrl_ops madera_pin_group_ops = {
.get_groups_count = madera_get_groups_count,
.get_group_name = madera_get_group_name,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 2bbd8ee93507..446d84fe0e31 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1376,8 +1376,15 @@ void devm_pinctrl_put(struct pinctrl *p)
}
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
- bool dup)
+/**
+ * pinctrl_register_mappings() - register a set of pin controller mappings
+ * @maps: the pincontrol mappings table to register. Note the pinctrl-core
+ * keeps a reference to the passed in maps, so they should _not_ be
+ * marked with __initdata.
+ * @num_maps: the number of maps in the mapping table
+ */
+int pinctrl_register_mappings(const struct pinctrl_map *maps,
+ unsigned num_maps)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1430,17 +1437,8 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
if (!maps_node)
return -ENOMEM;
+ maps_node->maps = maps;
maps_node->num_maps = num_maps;
- if (dup) {
- maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps,
- GFP_KERNEL);
- if (!maps_node->maps) {
- kfree(maps_node);
- return -ENOMEM;
- }
- } else {
- maps_node->maps = maps;
- }
mutex_lock(&pinctrl_maps_mutex);
list_add_tail(&maps_node->node, &pinctrl_maps);
@@ -1448,22 +1446,14 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
return 0;
}
+EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
/**
- * pinctrl_register_mappings() - register a set of pin controller mappings
- * @maps: the pincontrol mappings table to register. This should probably be
- * marked with __initdata so it can be discarded after boot. This
- * function will perform a shallow copy for the mapping entries.
- * @num_maps: the number of maps in the mapping table
+ * pinctrl_unregister_mappings() - unregister a set of pin controller mappings
+ * @maps: the pincontrol mappings table passed to pinctrl_register_mappings()
+ * when registering the mappings.
*/
-int pinctrl_register_mappings(const struct pinctrl_map *maps,
- unsigned num_maps)
-{
- return pinctrl_register_map(maps, num_maps, true);
-}
-EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
-
-void pinctrl_unregister_map(const struct pinctrl_map *map)
+void pinctrl_unregister_mappings(const struct pinctrl_map *map)
{
struct pinctrl_maps *maps_node;
@@ -1478,6 +1468,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map)
}
mutex_unlock(&pinctrl_maps_mutex);
}
+EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings);
/**
* pinctrl_force_sleep() - turn a given controller device into sleep state
@@ -1535,15 +1526,8 @@ int pinctrl_init_done(struct device *dev)
return ret;
}
-#ifdef CONFIG_PM
-
-/**
- * pinctrl_pm_select_state() - select pinctrl state for PM
- * @dev: device to select default state for
- * @state: state to set
- */
-static int pinctrl_pm_select_state(struct device *dev,
- struct pinctrl_state *state)
+static int pinctrl_select_bound_state(struct device *dev,
+ struct pinctrl_state *state)
{
struct dev_pin_info *pins = dev->pins;
int ret;
@@ -1558,15 +1542,27 @@ static int pinctrl_pm_select_state(struct device *dev,
}
/**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_select_default_state() - select default pinctrl state
* @dev: device to select default state for
*/
-int pinctrl_pm_select_default_state(struct device *dev)
+int pinctrl_select_default_state(struct device *dev)
{
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->default_state);
+ return pinctrl_select_bound_state(dev, dev->pins->default_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_select_default_state);
+
+#ifdef CONFIG_PM
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+ return pinctrl_select_default_state(dev);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
@@ -1579,7 +1575,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev)
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
+ return pinctrl_select_bound_state(dev, dev->pins->sleep_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
@@ -1592,7 +1588,7 @@ int pinctrl_pm_select_idle_state(struct device *dev)
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->idle_state);
+ return pinctrl_select_bound_state(dev, dev->pins->idle_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
#endif
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7f34167a0405..840103c40c14 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -236,10 +236,6 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
unsigned int pin);
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
- bool dup);
-void pinctrl_unregister_map(const struct pinctrl_map *map);
-
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 674920daac26..9357f7c46cf3 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -51,7 +51,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p)
struct pinctrl_dt_map *dt_map, *n1;
list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
- pinctrl_unregister_map(dt_map->map);
+ pinctrl_unregister_mappings(dt_map->map);
list_del(&dt_map->node);
dt_free_map(dt_map->pctldev, dt_map->map,
dt_map->num_maps);
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
- return pinctrl_register_map(map, num_maps, false);
+ return pinctrl_register_mappings(map, num_maps);
err_free_map:
dt_free_map(pctldev, map, num_maps);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 3ea9ce3e0cd9..de775a85a51e 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -137,6 +137,13 @@ config PINCTRL_IMX8MN
help
Say Y here to enable the imx8mn pinctrl driver
+config PINCTRL_IMX8MP
+ bool "IMX8MP pinctrl driver"
+ depends on ARCH_MXC && ARM64
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8mp pinctrl driver
+
config PINCTRL_IMX8MQ
bool "IMX8MQ pinctrl driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 78e9140c13e3..0ebd3af21e4d 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
obj-$(CONFIG_PINCTRL_IMX8MM) += pinctrl-imx8mm.o
obj-$(CONFIG_PINCTRL_IMX8MN) += pinctrl-imx8mn.o
+obj-$(CONFIG_PINCTRL_IMX8MP) += pinctrl-imx8mp.o
obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 7e29e3fecdb2..c00d0022d311 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -611,7 +611,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
if (!res)
return -ENOENT;
- ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ ipctl->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!ipctl->base)
return -ENOMEM;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
new file mode 100644
index 000000000000..e3f644c2ec13
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mp_pads {
+ MX8MP_IOMUXC_RESERVE0 = 0,
+ MX8MP_IOMUXC_RESERVE1 = 1,
+ MX8MP_IOMUXC_RESERVE2 = 2,
+ MX8MP_IOMUXC_RESERVE3 = 3,
+ MX8MP_IOMUXC_RESERVE4 = 4,
+ MX8MP_IOMUXC_GPIO1_IO00 = 5,
+ MX8MP_IOMUXC_GPIO1_IO01 = 6,
+ MX8MP_IOMUXC_GPIO1_IO02 = 7,
+ MX8MP_IOMUXC_GPIO1_IO03 = 8,
+ MX8MP_IOMUXC_GPIO1_IO04 = 9,
+ MX8MP_IOMUXC_GPIO1_IO05 = 10,
+ MX8MP_IOMUXC_GPIO1_IO06 = 11,
+ MX8MP_IOMUXC_GPIO1_IO07 = 12,
+ MX8MP_IOMUXC_GPIO1_IO08 = 13,
+ MX8MP_IOMUXC_GPIO1_IO09 = 14,
+ MX8MP_IOMUXC_GPIO1_IO10 = 15,
+ MX8MP_IOMUXC_GPIO1_IO11 = 16,
+ MX8MP_IOMUXC_GPIO1_IO12 = 17,
+ MX8MP_IOMUXC_GPIO1_IO13 = 18,
+ MX8MP_IOMUXC_GPIO1_IO14 = 19,
+ MX8MP_IOMUXC_GPIO1_IO15 = 20,
+ MX8MP_IOMUXC_ENET_MDC = 21,
+ MX8MP_IOMUXC_ENET_MDIO = 22,
+ MX8MP_IOMUXC_ENET_TD3 = 23,
+ MX8MP_IOMUXC_ENET_TD2 = 24,
+ MX8MP_IOMUXC_ENET_TD1 = 25,
+ MX8MP_IOMUXC_ENET_TD0 = 26,
+ MX8MP_IOMUXC_ENET_TX_CTL = 27,
+ MX8MP_IOMUXC_ENET_TXC = 28,
+ MX8MP_IOMUXC_ENET_RX_CTL = 29,
+ MX8MP_IOMUXC_ENET_RXC = 30,
+ MX8MP_IOMUXC_ENET_RD0 = 31,
+ MX8MP_IOMUXC_ENET_RD1 = 32,
+ MX8MP_IOMUXC_ENET_RD2 = 33,
+ MX8MP_IOMUXC_ENET_RD3 = 34,
+ MX8MP_IOMUXC_SD1_CLK = 35,
+ MX8MP_IOMUXC_SD1_CMD = 36,
+ MX8MP_IOMUXC_SD1_DATA0 = 37,
+ MX8MP_IOMUXC_SD1_DATA1 = 38,
+ MX8MP_IOMUXC_SD1_DATA2 = 39,
+ MX8MP_IOMUXC_SD1_DATA3 = 40,
+ MX8MP_IOMUXC_SD1_DATA4 = 41,
+ MX8MP_IOMUXC_SD1_DATA5 = 42,
+ MX8MP_IOMUXC_SD1_DATA6 = 43,
+ MX8MP_IOMUXC_SD1_DATA7 = 44,
+ MX8MP_IOMUXC_SD1_RESET_B = 45,
+ MX8MP_IOMUXC_SD1_STROBE = 46,
+ MX8MP_IOMUXC_SD2_CD_B = 47,
+ MX8MP_IOMUXC_SD2_CLK = 48,
+ MX8MP_IOMUXC_SD2_CMD = 49,
+ MX8MP_IOMUXC_SD2_DATA0 = 50,
+ MX8MP_IOMUXC_SD2_DATA1 = 51,
+ MX8MP_IOMUXC_SD2_DATA2 = 52,
+ MX8MP_IOMUXC_SD2_DATA3 = 53,
+ MX8MP_IOMUXC_SD2_RESET_B = 54,
+ MX8MP_IOMUXC_SD2_WP = 55,
+ MX8MP_IOMUXC_NAND_ALE = 56,
+ MX8MP_IOMUXC_NAND_CE0_B = 57,
+ MX8MP_IOMUXC_NAND_CE1_B = 58,
+ MX8MP_IOMUXC_NAND_CE2_B = 59,
+ MX8MP_IOMUXC_NAND_CE3_B = 60,
+ MX8MP_IOMUXC_NAND_CLE = 61,
+ MX8MP_IOMUXC_NAND_DATA00 = 62,
+ MX8MP_IOMUXC_NAND_DATA01 = 63,
+ MX8MP_IOMUXC_NAND_DATA02 = 64,
+ MX8MP_IOMUXC_NAND_DATA03 = 65,
+ MX8MP_IOMUXC_NAND_DATA04 = 66,
+ MX8MP_IOMUXC_NAND_DATA05 = 67,
+ MX8MP_IOMUXC_NAND_DATA06 = 68,
+ MX8MP_IOMUXC_NAND_DATA07 = 69,
+ MX8MP_IOMUXC_NAND_DQS = 70,
+ MX8MP_IOMUXC_NAND_RE_B = 71,
+ MX8MP_IOMUXC_NAND_READY_B = 72,
+ MX8MP_IOMUXC_NAND_WE_B = 73,
+ MX8MP_IOMUXC_NAND_WP_B = 74,
+ MX8MP_IOMUXC_SAI5_RXFS = 75,
+ MX8MP_IOMUXC_SAI5_RXC = 76,
+ MX8MP_IOMUXC_SAI5_RXD0 = 77,
+ MX8MP_IOMUXC_SAI5_RXD1 = 78,
+ MX8MP_IOMUXC_SAI5_RXD2 = 79,
+ MX8MP_IOMUXC_SAI5_RXD3 = 80,
+ MX8MP_IOMUXC_SAI5_MCLK = 81,
+ MX8MP_IOMUXC_SAI1_RXFS = 82,
+ MX8MP_IOMUXC_SAI1_RXC = 83,
+ MX8MP_IOMUXC_SAI1_RXD0 = 84,
+ MX8MP_IOMUXC_SAI1_RXD1 = 85,
+ MX8MP_IOMUXC_SAI1_RXD2 = 86,
+ MX8MP_IOMUXC_SAI1_RXD3 = 87,
+ MX8MP_IOMUXC_SAI1_RXD4 = 88,
+ MX8MP_IOMUXC_SAI1_RXD5 = 89,
+ MX8MP_IOMUXC_SAI1_RXD6 = 90,
+ MX8MP_IOMUXC_SAI1_RXD7 = 91,
+ MX8MP_IOMUXC_SAI1_TXFS = 92,
+ MX8MP_IOMUXC_SAI1_TXC = 93,
+ MX8MP_IOMUXC_SAI1_TXD0 = 94,
+ MX8MP_IOMUXC_SAI1_TXD1 = 95,
+ MX8MP_IOMUXC_SAI1_TXD2 = 96,
+ MX8MP_IOMUXC_SAI1_TXD3 = 97,
+ MX8MP_IOMUXC_SAI1_TXD4 = 98,
+ MX8MP_IOMUXC_SAI1_TXD5 = 99,
+ MX8MP_IOMUXC_SAI1_TXD6 = 100,
+ MX8MP_IOMUXC_SAI1_TXD7 = 101,
+ MX8MP_IOMUXC_SAI1_MCLK = 102,
+ MX8MP_IOMUXC_SAI2_RXFS = 103,
+ MX8MP_IOMUXC_SAI2_RXC = 104,
+ MX8MP_IOMUXC_SAI2_RXD0 = 105,
+ MX8MP_IOMUXC_SAI2_TXFS = 106,
+ MX8MP_IOMUXC_SAI2_TXC = 107,
+ MX8MP_IOMUXC_SAI2_TXD0 = 108,
+ MX8MP_IOMUXC_SAI2_MCLK = 109,
+ MX8MP_IOMUXC_SAI3_RXFS = 110,
+ MX8MP_IOMUXC_SAI3_RXC = 111,
+ MX8MP_IOMUXC_SAI3_RXD = 112,
+ MX8MP_IOMUXC_SAI3_TXFS = 113,
+ MX8MP_IOMUXC_SAI3_TXC = 114,
+ MX8MP_IOMUXC_SAI3_TXD = 115,
+ MX8MP_IOMUXC_SAI3_MCLK = 116,
+ MX8MP_IOMUXC_SPDIF_TX = 117,
+ MX8MP_IOMUXC_SPDIF_RX = 118,
+ MX8MP_IOMUXC_SPDIF_EXT_CLK = 119,
+ MX8MP_IOMUXC_ECSPI1_SCLK = 120,
+ MX8MP_IOMUXC_ECSPI1_MOSI = 121,
+ MX8MP_IOMUXC_ECSPI1_MISO = 122,
+ MX8MP_IOMUXC_ECSPI1_SS0 = 123,
+ MX8MP_IOMUXC_ECSPI2_SCLK = 124,
+ MX8MP_IOMUXC_ECSPI2_MOSI = 125,
+ MX8MP_IOMUXC_ECSPI2_MISO = 126,
+ MX8MP_IOMUXC_ECSPI2_SS0 = 127,
+ MX8MP_IOMUXC_I2C1_SCL = 128,
+ MX8MP_IOMUXC_I2C1_SDA = 129,
+ MX8MP_IOMUXC_I2C2_SCL = 130,
+ MX8MP_IOMUXC_I2C2_SDA = 131,
+ MX8MP_IOMUXC_I2C3_SCL = 132,
+ MX8MP_IOMUXC_I2C3_SDA = 133,
+ MX8MP_IOMUXC_I2C4_SCL = 134,
+ MX8MP_IOMUXC_I2C4_SDA = 135,
+ MX8MP_IOMUXC_UART1_RXD = 136,
+ MX8MP_IOMUXC_UART1_TXD = 137,
+ MX8MP_IOMUXC_UART2_RXD = 138,
+ MX8MP_IOMUXC_UART2_TXD = 139,
+ MX8MP_IOMUXC_UART3_RXD = 140,
+ MX8MP_IOMUXC_UART3_TXD = 141,
+ MX8MP_IOMUXC_UART4_RXD = 142,
+ MX8MP_IOMUXC_UART4_TXD = 143,
+ MX8MP_IOMUXC_HDMI_DDC_SCL = 144,
+ MX8MP_IOMUXC_HDMI_DDC_SDA = 145,
+ MX8MP_IOMUXC_HDMI_CEC = 146,
+ MX8MP_IOMUXC_HDMI_HPD = 147,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mp_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE0_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE1_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE2_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE3_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_CEC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_HPD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mp_pinctrl_info = {
+ .pins = imx8mp_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8mp_pinctrl_pads),
+ .gpr_compatible = "fsl,imx8mp-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mp_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8mp-iomuxc", .data = &imx8mp_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imx8mp_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8mp_pinctrl_info);
+}
+
+static struct platform_driver imx8mp_pinctrl_driver = {
+ .driver = {
+ .name = "imx8mp-pinctrl",
+ .of_match_table = of_match_ptr(imx8mp_pinctrl_of_match),
+ },
+ .probe = imx8mp_pinctrl_probe,
+};
+
+static int __init imx8mp_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8mp_pinctrl_driver);
+}
+arch_initcall(imx8mp_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 6091947a8f51..ee440ec4c94c 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -31,6 +31,19 @@ config PINCTRL_CHERRYVIEW
Cherryview/Braswell pinctrl driver provides an interface that
allows configuring of SoC pins and using them as GPIOs.
+config PINCTRL_LYNXPOINT
+ tristate "Intel Lynxpoint pinctrl and GPIO driver"
+ depends on ACPI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ help
+ Lynxpoint is the PCH of Intel Haswell. This pinctrl driver
+ provides an interface that allows configuring of PCH pins and
+ using them as GPIOs.
+
config PINCTRL_MERRIFIELD
tristate "Intel Merrifield pinctrl driver"
depends on X86_INTEL_MID
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 7e620b471ef6..f60f99cfa7aa 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
+obj-$(CONFIG_PINCTRL_LYNXPOINT) += pinctrl-lynxpoint.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 55141d5de29e..b409642f168d 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -93,7 +93,7 @@
#define BYT_DEFAULT_GPIO_MUX 0
#define BYT_ALTER_GPIO_MUX 1
-struct byt_gpio_pin_context {
+struct intel_pad_context {
u32 conf0;
u32 val;
};
@@ -105,16 +105,6 @@ struct byt_gpio_pin_context {
.pad_map = (map),\
}
-struct byt_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- struct pinctrl_dev *pctl_dev;
- struct pinctrl_desc pctl_desc;
- const struct intel_pinctrl_soc_data *soc_data;
- struct intel_community *communities_copy;
- struct byt_gpio_pin_context *saved_context;
-};
-
/* SCORE pins, aka GPIOC_<pin_no> or GPIO_S0_SC[<pin_no>] */
static const struct pinctrl_pin_desc byt_score_pins[] = {
PINCTRL_PIN(0, "SATA_GP0"),
@@ -550,14 +540,14 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = {
static DEFINE_RAW_SPINLOCK(byt_lock);
-static struct intel_community *byt_get_community(struct byt_gpio *vg,
+static struct intel_community *byt_get_community(struct intel_pinctrl *vg,
unsigned int pin)
{
struct intel_community *comm;
int i;
- for (i = 0; i < vg->soc_data->ncommunities; i++) {
- comm = vg->communities_copy + i;
+ for (i = 0; i < vg->ncommunities; i++) {
+ comm = vg->communities + i;
if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
return comm;
}
@@ -565,7 +555,7 @@ static struct intel_community *byt_get_community(struct byt_gpio *vg,
return NULL;
}
-static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset,
int reg)
{
struct intel_community *comm = byt_get_community(vg, offset);
@@ -592,17 +582,17 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
static int byt_get_groups_count(struct pinctrl_dev *pctldev)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->ngroups;
+ return vg->soc->ngroups;
}
static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->groups[selector].name;
+ return vg->soc->groups[selector].name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -610,10 +600,10 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
const unsigned int **pins,
unsigned int *num_pins)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc_data->groups[selector].pins;
- *num_pins = vg->soc_data->groups[selector].npins;
+ *pins = vg->soc->groups[selector].pins;
+ *num_pins = vg->soc->groups[selector].npins;
return 0;
}
@@ -626,17 +616,17 @@ static const struct pinctrl_ops byt_pinctrl_ops = {
static int byt_get_functions_count(struct pinctrl_dev *pctldev)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->nfunctions;
+ return vg->soc->nfunctions;
}
static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->functions[selector].name;
+ return vg->soc->functions[selector].name;
}
static int byt_get_function_groups(struct pinctrl_dev *pctldev,
@@ -644,15 +634,15 @@ static int byt_get_function_groups(struct pinctrl_dev *pctldev,
const char * const **groups,
unsigned int *num_groups)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *groups = vg->soc_data->functions[selector].groups;
- *num_groups = vg->soc_data->functions[selector].ngroups;
+ *groups = vg->soc->functions[selector].groups;
+ *num_groups = vg->soc->functions[selector].ngroups;
return 0;
}
-static void byt_set_group_simple_mux(struct byt_gpio *vg,
+static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
unsigned int func)
{
@@ -667,7 +657,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
group.name, i);
continue;
@@ -682,7 +672,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
-static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
const unsigned int *func)
{
@@ -697,7 +687,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
group.name, i);
continue;
@@ -715,9 +705,9 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
unsigned int group_selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
- const struct intel_function func = vg->soc_data->functions[func_selector];
- const struct intel_pingroup group = vg->soc_data->groups[group_selector];
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_function func = vg->soc->functions[func_selector];
+ const struct intel_pingroup group = vg->soc->groups[group_selector];
if (group.modes)
byt_set_group_mixed_mux(vg, group, group.modes);
@@ -729,22 +719,22 @@ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
return 0;
}
-static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned int offset)
+static u32 byt_get_gpio_mux(struct intel_pinctrl *vg, unsigned int offset)
{
/* SCORE pin 92-93 */
- if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
+ if (!strcmp(vg->soc->uid, BYT_SCORE_ACPI_UID) &&
offset >= 92 && offset <= 93)
return BYT_ALTER_GPIO_MUX;
/* SUS pin 11-21 */
- if (!strcmp(vg->soc_data->uid, BYT_SUS_ACPI_UID) &&
+ if (!strcmp(vg->soc->uid, BYT_SUS_ACPI_UID) &&
offset >= 11 && offset <= 21)
return BYT_ALTER_GPIO_MUX;
return BYT_DEFAULT_GPIO_MUX;
}
-static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int offset)
{
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
@@ -752,7 +742,13 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
+ /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
+ if (value & BYT_DIRECT_IRQ_EN)
+ /* nothing to do */ ;
+ else
+ value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
writel(value, reg);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
@@ -761,7 +757,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
u32 value, gpio_mux;
unsigned long flags;
@@ -784,13 +780,12 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
value |= gpio_mux;
writel(value, reg);
- dev_warn(&vg->pdev->dev, FW_BUG
- "pin %u forcibly re-configured as GPIO\n", offset);
+ dev_warn(vg->dev, FW_BUG "pin %u forcibly re-configured as GPIO\n", offset);
}
raw_spin_unlock_irqrestore(&byt_lock, flags);
- pm_runtime_get(&vg->pdev->dev);
+ pm_runtime_get(vg->dev);
return 0;
}
@@ -799,10 +794,10 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
byt_gpio_clear_triggering(vg, offset);
- pm_runtime_put(&vg->pdev->dev);
+ pm_runtime_put(vg->dev);
}
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
@@ -810,7 +805,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned int offset,
bool input)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
@@ -822,15 +817,15 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
value &= ~BYT_DIR_MASK;
if (input)
value |= BYT_OUTPUT_EN;
- else
+ else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
/*
* Before making any direction modifications, do a check if gpio
* is set for direct IRQ. On baytrail, setting GPIO to output
- * does not make sense, so let's at least warn the caller before
+ * does not make sense, so let's at least inform the caller before
* they shoot themselves in the foot.
*/
- WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
- "Potential Error: Setting GPIO with direct_irq_en to output");
+ dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+
writel(value, val_reg);
raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -893,7 +888,7 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
unsigned long *config)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
enum pin_config_param param = pinconf_to_config_param(*config);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -978,7 +973,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
unsigned long *configs,
unsigned int num_configs)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
unsigned int param, arg;
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -1012,7 +1007,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (val & BYT_INPUT_EN) {
val &= ~BYT_INPUT_EN;
writel(val, val_reg);
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"pin %u forcibly set to input mode\n",
offset);
}
@@ -1034,7 +1029,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (val & BYT_INPUT_EN) {
val &= ~BYT_INPUT_EN;
writel(val, val_reg);
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"pin %u forcibly set to input mode\n",
offset);
}
@@ -1115,7 +1110,7 @@ static const struct pinctrl_desc byt_pinctrl_desc = {
static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 val;
@@ -1129,7 +1124,7 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 old_val;
@@ -1148,7 +1143,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 value;
@@ -1161,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
raw_spin_unlock_irqrestore(&byt_lock, flags);
if (!(value & BYT_OUTPUT_EN))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (!(value & BYT_INPUT_EN))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
return -EINVAL;
}
@@ -1188,11 +1183,11 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
int i;
u32 conf0, val;
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
const struct intel_community *comm;
const char *pull_str = NULL;
const char *pull = NULL;
@@ -1202,7 +1197,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
unsigned int pin;
raw_spin_lock_irqsave(&byt_lock, flags);
- pin = vg->soc_data->pins[i].number;
+ pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
@@ -1297,7 +1292,7 @@ static const struct gpio_chip byt_gpio_chip = {
static void byt_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
unsigned int offset = irqd_to_hwirq(d);
void __iomem *reg;
@@ -1313,7 +1308,7 @@ static void byt_irq_ack(struct irq_data *d)
static void byt_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
}
@@ -1321,7 +1316,7 @@ static void byt_irq_mask(struct irq_data *d)
static void byt_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
unsigned int offset = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *reg;
@@ -1359,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d)
static int byt_irq_type(struct irq_data *d, unsigned int type)
{
- struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
u32 offset = irqd_to_hwirq(d);
u32 value;
unsigned long flags;
@@ -1395,20 +1390,10 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip byt_irqchip = {
- .name = "BYT-GPIO",
- .irq_ack = byt_irq_ack,
- .irq_mask = byt_irq_mask,
- .irq_unmask = byt_irq_unmask,
- .irq_set_type = byt_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static void byt_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct byt_gpio *vg = gpiochip_get_data(
- irq_desc_get_handler_data(desc));
+ struct intel_pinctrl *vg = gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin;
void __iomem *reg;
@@ -1420,7 +1405,7 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve interrupt status register\n",
base);
continue;
@@ -1441,22 +1426,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
{
- /*
- * FIXME: currently the valid_mask is filled in as part of
- * initializing the irq_chip below in byt_gpio_irq_init_hw().
- * when converting this driver to the new way of passing the
- * gpio_irq_chip along when adding the gpio_chip, move the
- * mask initialization into this callback instead. Right now
- * this callback is here to make sure the mask gets allocated.
- */
-}
-
-static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
-{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- struct device *dev = &vg->pdev->dev;
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg;
- u32 base, value;
+ u32 value;
int i;
/*
@@ -1464,12 +1436,12 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
* do not use direct IRQ mode. This will prevent spurious
* interrupts from misconfigured pins.
*/
- for (i = 0; i < vg->soc_data->npins; i++) {
- unsigned int pin = vg->soc_data->pins[i].number;
+ for (i = 0; i < vg->soc->npins; i++) {
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
@@ -1477,20 +1449,27 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
value = readl(reg);
if (value & BYT_DIRECT_IRQ_EN) {
- clear_bit(i, chip->irq.valid_mask);
- dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+ clear_bit(i, valid_mask);
+ dev_dbg(vg->dev, "excluding GPIO %d from IRQ domain\n", i);
} else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
- dev_dbg(dev, "disabling GPIO %d\n", i);
+ dev_dbg(vg->dev, "disabling GPIO %d\n", i);
}
}
+}
+
+static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *reg;
+ u32 base, value;
/* clear interrupt status trigger registers */
- for (base = 0; base < vg->soc_data->npins; base += 32) {
+ for (base = 0; base < vg->soc->npins; base += 32) {
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve irq status reg\n",
base);
continue;
@@ -1501,7 +1480,7 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
might be misconfigured in bios */
value = readl(reg);
if (value)
- dev_err(&vg->pdev->dev,
+ dev_err(vg->dev,
"GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
base / 32, value);
}
@@ -1511,19 +1490,20 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
static int byt_gpio_add_pin_ranges(struct gpio_chip *chip)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- struct device *dev = &vg->pdev->dev;
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ struct device *dev = vg->dev;
int ret;
- ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc_data->npins);
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc->npins);
if (ret)
dev_err(dev, "failed to add GPIO pin range\n");
return ret;
}
-static int byt_gpio_probe(struct byt_gpio *vg)
+static int byt_gpio_probe(struct intel_pinctrl *vg)
{
+ struct platform_device *pdev = to_platform_device(vg->dev);
struct gpio_chip *gc;
struct resource *irq_rc;
int ret;
@@ -1531,32 +1511,39 @@ static int byt_gpio_probe(struct byt_gpio *vg)
/* Set up gpio chip */
vg->chip = byt_gpio_chip;
gc = &vg->chip;
- gc->label = dev_name(&vg->pdev->dev);
+ gc->label = dev_name(vg->dev);
gc->base = -1;
gc->can_sleep = false;
gc->add_pin_ranges = byt_gpio_add_pin_ranges;
- gc->parent = &vg->pdev->dev;
- gc->ngpio = vg->soc_data->npins;
- gc->irq.init_valid_mask = byt_init_irq_valid_mask;
+ gc->parent = vg->dev;
+ gc->ngpio = vg->soc->npins;
#ifdef CONFIG_PM_SLEEP
- vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
- sizeof(*vg->saved_context), GFP_KERNEL);
- if (!vg->saved_context)
+ vg->context.pads = devm_kcalloc(vg->dev, gc->ngpio, sizeof(*vg->context.pads),
+ GFP_KERNEL);
+ if (!vg->context.pads)
return -ENOMEM;
#endif
/* set up interrupts */
- irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_rc && irq_rc->start) {
struct gpio_irq_chip *girq;
+ vg->irqchip.name = "BYT-GPIO",
+ vg->irqchip.irq_ack = byt_irq_ack,
+ vg->irqchip.irq_mask = byt_irq_mask,
+ vg->irqchip.irq_unmask = byt_irq_unmask,
+ vg->irqchip.irq_set_type = byt_irq_type,
+ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
+
girq = &gc->irq;
- girq->chip = &byt_irqchip;
+ girq->chip = &vg->irqchip;
girq->init_hw = byt_gpio_irq_init_hw;
+ girq->init_valid_mask = byt_init_irq_valid_mask;
girq->parent_handler = byt_gpio_irq_handler;
girq->num_parents = 1;
- girq->parents = devm_kcalloc(&vg->pdev->dev, girq->num_parents,
+ girq->parents = devm_kcalloc(vg->dev, girq->num_parents,
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
@@ -1565,34 +1552,35 @@ static int byt_gpio_probe(struct byt_gpio *vg)
girq->handler = handle_bad_irq;
}
- ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
+ ret = devm_gpiochip_add_data(vg->dev, gc, vg);
if (ret) {
- dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
+ dev_err(vg->dev, "failed adding byt-gpio chip\n");
return ret;
}
return ret;
}
-static int byt_set_soc_data(struct byt_gpio *vg,
- const struct intel_pinctrl_soc_data *soc_data)
+static int byt_set_soc_data(struct intel_pinctrl *vg,
+ const struct intel_pinctrl_soc_data *soc)
{
+ struct platform_device *pdev = to_platform_device(vg->dev);
int i;
- vg->soc_data = soc_data;
- vg->communities_copy = devm_kcalloc(&vg->pdev->dev,
- soc_data->ncommunities,
- sizeof(*vg->communities_copy),
- GFP_KERNEL);
- if (!vg->communities_copy)
+ vg->soc = soc;
+
+ vg->ncommunities = vg->soc->ncommunities;
+ vg->communities = devm_kcalloc(vg->dev, vg->ncommunities,
+ sizeof(*vg->communities), GFP_KERNEL);
+ if (!vg->communities)
return -ENOMEM;
- for (i = 0; i < soc_data->ncommunities; i++) {
- struct intel_community *comm = vg->communities_copy + i;
+ for (i = 0; i < vg->soc->ncommunities; i++) {
+ struct intel_community *comm = vg->communities + i;
- *comm = vg->soc_data->communities[i];
+ *comm = vg->soc->communities[i];
- comm->pad_regs = devm_platform_ioremap_resource(vg->pdev, 0);
+ comm->pad_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(comm->pad_regs))
return PTR_ERR(comm->pad_regs);
}
@@ -1610,15 +1598,16 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data *soc_data = NULL;
const struct intel_pinctrl_soc_data **soc_table;
+ struct device *dev = &pdev->dev;
struct acpi_device *acpi_dev;
- struct byt_gpio *vg;
+ struct intel_pinctrl *vg;
int i, ret;
- acpi_dev = ACPI_COMPANION(&pdev->dev);
+ acpi_dev = ACPI_COMPANION(dev);
if (!acpi_dev)
return -ENODEV;
- soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(&pdev->dev);
+ soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
for (i = 0; soc_table[i]; i++) {
if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
@@ -1630,26 +1619,26 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
if (!soc_data)
return -ENODEV;
- vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
+ vg = devm_kzalloc(dev, sizeof(*vg), GFP_KERNEL);
if (!vg)
return -ENOMEM;
- vg->pdev = pdev;
+ vg->dev = dev;
ret = byt_set_soc_data(vg, soc_data);
if (ret) {
- dev_err(&pdev->dev, "failed to set soc data\n");
+ dev_err(dev, "failed to set soc data\n");
return ret;
}
- vg->pctl_desc = byt_pinctrl_desc;
- vg->pctl_desc.name = dev_name(&pdev->dev);
- vg->pctl_desc.pins = vg->soc_data->pins;
- vg->pctl_desc.npins = vg->soc_data->npins;
+ vg->pctldesc = byt_pinctrl_desc;
+ vg->pctldesc.name = dev_name(dev);
+ vg->pctldesc.pins = vg->soc->pins;
+ vg->pctldesc.npins = vg->soc->npins;
- vg->pctl_dev = devm_pinctrl_register(&pdev->dev, &vg->pctl_desc, vg);
- if (IS_ERR(vg->pctl_dev)) {
- dev_err(&pdev->dev, "failed to register pinctrl driver\n");
- return PTR_ERR(vg->pctl_dev);
+ vg->pctldev = devm_pinctrl_register(dev, &vg->pctldesc, vg);
+ if (IS_ERR(vg->pctldev)) {
+ dev_err(dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(vg->pctldev);
}
ret = byt_gpio_probe(vg);
@@ -1657,7 +1646,7 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, vg);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
return 0;
}
@@ -1665,30 +1654,30 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int byt_gpio_suspend(struct device *dev)
{
- struct byt_gpio *vg = dev_get_drvdata(dev);
+ struct intel_pinctrl *vg = dev_get_drvdata(dev);
unsigned long flags;
int i;
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
u32 value;
- unsigned int pin = vg->soc_data->pins[i].number;
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
}
value = readl(reg) & BYT_CONF0_RESTORE_MASK;
- vg->saved_context[i].conf0 = value;
+ vg->context.pads[i].conf0 = value;
reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg) & BYT_VAL_RESTORE_MASK;
- vg->saved_context[i].val = value;
+ vg->context.pads[i].val = value;
}
raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -1697,29 +1686,29 @@ static int byt_gpio_suspend(struct device *dev)
static int byt_gpio_resume(struct device *dev)
{
- struct byt_gpio *vg = dev_get_drvdata(dev);
+ struct intel_pinctrl *vg = dev_get_drvdata(dev);
unsigned long flags;
int i;
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
u32 value;
- unsigned int pin = vg->soc_data->pins[i].number;
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
}
value = readl(reg);
if ((value & BYT_CONF0_RESTORE_MASK) !=
- vg->saved_context[i].conf0) {
+ vg->context.pads[i].conf0) {
value &= ~BYT_CONF0_RESTORE_MASK;
- value |= vg->saved_context[i].conf0;
+ value |= vg->context.pads[i].conf0;
writel(value, reg);
dev_info(dev, "restored pin %d conf0 %#08x", i, value);
}
@@ -1727,11 +1716,11 @@ static int byt_gpio_resume(struct device *dev)
reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg);
if ((value & BYT_VAL_RESTORE_MASK) !=
- vg->saved_context[i].val) {
+ vg->context.pads[i].val) {
u32 v;
v = value & ~BYT_VAL_RESTORE_MASK;
- v |= vg->saved_context[i].val;
+ v |= vg->context.pads[i].val;
if (v != value) {
writel(v, reg);
dev_dbg(dev, "restored pin %d val %#08x\n",
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 60527b93a711..4c74fdde576d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1289,7 +1289,10 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
- return direction != CHV_PADCTRL0_GPIOCFG_GPO;
+ if (direction == CHV_PADCTRL0_GPIOCFG_GPO)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 4860bc9a4e48..74fdfd2b9ff5 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -8,8 +8,8 @@
*/
#include <linux/acpi.h>
-#include <linux/interrupt.h>
#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -85,39 +85,6 @@ struct intel_community_context {
u32 *hostown;
};
-struct intel_pinctrl_context {
- struct intel_pad_context *pads;
- struct intel_community_context *communities;
-};
-
-/**
- * struct intel_pinctrl - Intel pinctrl private structure
- * @dev: Pointer to the device structure
- * @lock: Lock to serialize register access
- * @pctldesc: Pin controller description
- * @pctldev: Pointer to the pin controller device
- * @chip: GPIO chip in this pin controller
- * @irqchip: IRQ chip in this pin controller
- * @soc: SoC/PCH specific pin configuration data
- * @communities: All communities in this pin controller
- * @ncommunities: Number of communities in this pin controller
- * @context: Configuration saved over system sleep
- * @irq: pinctrl/GPIO chip irq number
- */
-struct intel_pinctrl {
- struct device *dev;
- raw_spinlock_t lock;
- struct pinctrl_desc pctldesc;
- struct pinctrl_dev *pctldev;
- struct gpio_chip chip;
- struct irq_chip irqchip;
- const struct intel_pinctrl_soc_data *soc;
- struct intel_community *communities;
- size_t ncommunities;
- struct intel_pinctrl_context context;
- int irq;
-};
-
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
#define padgroup_offset(g, p) ((p) - (g)->base)
@@ -944,7 +911,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (padcfg0 & PADCFG0_PMODE_MASK)
return -EINVAL;
- return !!(padcfg0 & PADCFG0_GPIOTXDIS);
+ if (padcfg0 & PADCFG0_GPIOTXDIS)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1160,8 +1130,8 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return ret;
}
-static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
+static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
+ const struct intel_community *community)
{
int ret = 0, i;
@@ -1181,6 +1151,24 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
+static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int ret, i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+
+ ret = intel_gpio_add_community_ranges(pctrl, community);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
@@ -1205,7 +1193,8 @@ static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
{
- int ret, i;
+ int ret;
+ struct gpio_irq_chip *girq;
pctrl->chip = intel_gpio_chip;
@@ -1214,6 +1203,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
+ pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges;
pctrl->irq = irq;
/* Setup IRQ chip */
@@ -1225,26 +1215,9 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
- ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to register gpiochip\n");
- return ret;
- }
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- struct intel_community *community = &pctrl->communities[i];
-
- ret = intel_gpio_add_pin_ranges(pctrl, community);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- return ret;
- }
- }
-
/*
- * We need to request the interrupt here (instead of providing chip
- * to the irq directly) because on some platforms several GPIO
- * controllers share the same interrupt line.
+ * On some platforms several GPIO controllers share the same interrupt
+ * line.
*/
ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq,
IRQF_SHARED | IRQF_NO_THREAD,
@@ -1254,14 +1227,20 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return ret;
}
- ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
- handle_bad_irq, IRQ_TYPE_NONE);
+ girq = &pctrl->chip.irq;
+ girq->chip = &pctrl->irqchip;
+ /* This will let us handle the IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
- dev_err(pctrl->dev, "failed to add irqchip\n");
+ dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 34b38a321760..c6f066f6d3fb 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,7 +10,10 @@
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/spinlock_types.h>
struct pinctrl_pin_desc;
struct platform_device;
@@ -174,6 +177,47 @@ struct intel_pinctrl_soc_data {
size_t ncommunities;
};
+struct intel_pad_context;
+struct intel_community_context;
+
+/**
+ * struct intel_pinctrl_context - context to be saved during suspend-resume
+ * @pads: Opaque context per pad (driver dependent)
+ * @communities: Opaque context per community (driver dependent)
+ */
+struct intel_pinctrl_context {
+ struct intel_pad_context *pads;
+ struct intel_community_context *communities;
+};
+
+/**
+ * struct intel_pinctrl - Intel pinctrl private structure
+ * @dev: Pointer to the device structure
+ * @lock: Lock to serialize register access
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
+ * @soc: SoC/PCH specific pin configuration data
+ * @communities: All communities in this pin controller
+ * @ncommunities: Number of communities in this pin controller
+ * @context: Configuration saved over system sleep
+ * @irq: pinctrl/GPIO chip irq number
+ */
+struct intel_pinctrl {
+ struct device *dev;
+ raw_spinlock_t lock;
+ struct pinctrl_desc pctldesc;
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ struct irq_chip irqchip;
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_community *communities;
+ size_t ncommunities;
+ struct intel_pinctrl_context context;
+ int irq;
+};
+
int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
new file mode 100644
index 000000000000..e928742c7181
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Lynxpoint PCH pinctrl/GPIO driver
+ *
+ * Copyright (c) 2012, 2019, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-intel.h"
+
+#define COMMUNITY(p, n) \
+ { \
+ .pin_base = (p), \
+ .npins = (n), \
+ }
+
+static const struct pinctrl_pin_desc lptlp_pins[] = {
+ PINCTRL_PIN(0, "GP0_UART1_RXD"),
+ PINCTRL_PIN(1, "GP1_UART1_TXD"),
+ PINCTRL_PIN(2, "GP2_UART1_RTSB"),
+ PINCTRL_PIN(3, "GP3_UART1_CTSB"),
+ PINCTRL_PIN(4, "GP4_I2C0_SDA"),
+ PINCTRL_PIN(5, "GP5_I2C0_SCL"),
+ PINCTRL_PIN(6, "GP6_I2C1_SDA"),
+ PINCTRL_PIN(7, "GP7_I2C1_SCL"),
+ PINCTRL_PIN(8, "GP8"),
+ PINCTRL_PIN(9, "GP9"),
+ PINCTRL_PIN(10, "GP10"),
+ PINCTRL_PIN(11, "GP11_SMBALERTB"),
+ PINCTRL_PIN(12, "GP12_LANPHYPC"),
+ PINCTRL_PIN(13, "GP13"),
+ PINCTRL_PIN(14, "GP14"),
+ PINCTRL_PIN(15, "GP15"),
+ PINCTRL_PIN(16, "GP16_MGPIO9"),
+ PINCTRL_PIN(17, "GP17_MGPIO10"),
+ PINCTRL_PIN(18, "GP18_SRC0CLKRQB"),
+ PINCTRL_PIN(19, "GP19_SRC1CLKRQB"),
+ PINCTRL_PIN(20, "GP20_SRC2CLKRQB"),
+ PINCTRL_PIN(21, "GP21_SRC3CLKRQB"),
+ PINCTRL_PIN(22, "GP22_SRC4CLKRQB_TRST2"),
+ PINCTRL_PIN(23, "GP23_SRC5CLKRQB_TDI2"),
+ PINCTRL_PIN(24, "GP24_MGPIO0"),
+ PINCTRL_PIN(25, "GP25_USBWAKEOUTB"),
+ PINCTRL_PIN(26, "GP26_MGPIO5"),
+ PINCTRL_PIN(27, "GP27_MGPIO6"),
+ PINCTRL_PIN(28, "GP28_MGPIO7"),
+ PINCTRL_PIN(29, "GP29_SLP_WLANB_MGPIO3"),
+ PINCTRL_PIN(30, "GP30_SUSWARNB_SUSPWRDNACK_MGPIO1"),
+ PINCTRL_PIN(31, "GP31_ACPRESENT_MGPIO2"),
+ PINCTRL_PIN(32, "GP32_CLKRUNB"),
+ PINCTRL_PIN(33, "GP33_DEVSLP0"),
+ PINCTRL_PIN(34, "GP34_SATA0XPCIE6L3B_SATA0GP"),
+ PINCTRL_PIN(35, "GP35_SATA1XPCIE6L2B_SATA1GP"),
+ PINCTRL_PIN(36, "GP36_SATA2XPCIE6L1B_SATA2GP"),
+ PINCTRL_PIN(37, "GP37_SATA3XPCIE6L0B_SATA3GP"),
+ PINCTRL_PIN(38, "GP38_DEVSLP1"),
+ PINCTRL_PIN(39, "GP39_DEVSLP2"),
+ PINCTRL_PIN(40, "GP40_OC0B"),
+ PINCTRL_PIN(41, "GP41_OC1B"),
+ PINCTRL_PIN(42, "GP42_OC2B"),
+ PINCTRL_PIN(43, "GP43_OC3B"),
+ PINCTRL_PIN(44, "GP44"),
+ PINCTRL_PIN(45, "GP45_TMS2"),
+ PINCTRL_PIN(46, "GP46_TDO2"),
+ PINCTRL_PIN(47, "GP47"),
+ PINCTRL_PIN(48, "GP48"),
+ PINCTRL_PIN(49, "GP49"),
+ PINCTRL_PIN(50, "GP50"),
+ PINCTRL_PIN(51, "GP51_GSXDOUT"),
+ PINCTRL_PIN(52, "GP52_GSXSLOAD"),
+ PINCTRL_PIN(53, "GP53_GSXDIN"),
+ PINCTRL_PIN(54, "GP54_GSXSRESETB"),
+ PINCTRL_PIN(55, "GP55_GSXCLK"),
+ PINCTRL_PIN(56, "GP56"),
+ PINCTRL_PIN(57, "GP57"),
+ PINCTRL_PIN(58, "GP58"),
+ PINCTRL_PIN(59, "GP59"),
+ PINCTRL_PIN(60, "GP60_SML0ALERTB_MGPIO4"),
+ PINCTRL_PIN(61, "GP61_SUS_STATB"),
+ PINCTRL_PIN(62, "GP62_SUSCLK"),
+ PINCTRL_PIN(63, "GP63_SLP_S5B"),
+ PINCTRL_PIN(64, "GP64_SDIO_CLK"),
+ PINCTRL_PIN(65, "GP65_SDIO_CMD"),
+ PINCTRL_PIN(66, "GP66_SDIO_D0"),
+ PINCTRL_PIN(67, "GP67_SDIO_D1"),
+ PINCTRL_PIN(68, "GP68_SDIO_D2"),
+ PINCTRL_PIN(69, "GP69_SDIO_D3"),
+ PINCTRL_PIN(70, "GP70_SDIO_POWER_EN"),
+ PINCTRL_PIN(71, "GP71_MPHYPC"),
+ PINCTRL_PIN(72, "GP72_BATLOWB"),
+ PINCTRL_PIN(73, "GP73_SML1ALERTB_PCHHOTB_MGPIO8"),
+ PINCTRL_PIN(74, "GP74_SML1DATA_MGPIO12"),
+ PINCTRL_PIN(75, "GP75_SML1CLK_MGPIO11"),
+ PINCTRL_PIN(76, "GP76_BMBUSYB"),
+ PINCTRL_PIN(77, "GP77_PIRQAB"),
+ PINCTRL_PIN(78, "GP78_PIRQBB"),
+ PINCTRL_PIN(79, "GP79_PIRQCB"),
+ PINCTRL_PIN(80, "GP80_PIRQDB"),
+ PINCTRL_PIN(81, "GP81_SPKR"),
+ PINCTRL_PIN(82, "GP82_RCINB"),
+ PINCTRL_PIN(83, "GP83_GSPI0_CSB"),
+ PINCTRL_PIN(84, "GP84_GSPI0_CLK"),
+ PINCTRL_PIN(85, "GP85_GSPI0_MISO"),
+ PINCTRL_PIN(86, "GP86_GSPI0_MOSI"),
+ PINCTRL_PIN(87, "GP87_GSPI1_CSB"),
+ PINCTRL_PIN(88, "GP88_GSPI1_CLK"),
+ PINCTRL_PIN(89, "GP89_GSPI1_MISO"),
+ PINCTRL_PIN(90, "GP90_GSPI1_MOSI"),
+ PINCTRL_PIN(91, "GP91_UART0_RXD"),
+ PINCTRL_PIN(92, "GP92_UART0_TXD"),
+ PINCTRL_PIN(93, "GP93_UART0_RTSB"),
+ PINCTRL_PIN(94, "GP94_UART0_CTSB"),
+};
+
+static const struct intel_community lptlp_communities[] = {
+ COMMUNITY(0, 95),
+};
+
+static const struct intel_pinctrl_soc_data lptlp_soc_data = {
+ .pins = lptlp_pins,
+ .npins = ARRAY_SIZE(lptlp_pins),
+ .communities = lptlp_communities,
+ .ncommunities = ARRAY_SIZE(lptlp_communities),
+};
+
+/* LynxPoint chipset has support for 95 GPIO pins */
+
+#define LP_NUM_GPIO 95
+
+/* Bitmapped register offsets */
+#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
+#define LP_IRQ2IOXAPIC 0x10 /* Bitmap, set by bios, 1: pin routed to IOxAPIC */
+#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
+#define LP_INT_STAT 0x80
+#define LP_INT_ENABLE 0x90
+
+/* Each pin has two 32 bit config registers, starting at 0x100 */
+#define LP_CONFIG1 0x100
+#define LP_CONFIG2 0x104
+
+/* LP_CONFIG1 reg bits */
+#define OUT_LVL_BIT BIT(31)
+#define IN_LVL_BIT BIT(30)
+#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
+#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
+#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
+#define USE_SEL_MASK GENMASK(1, 0) /* 0: Native, 1: GPIO, ... */
+#define USE_SEL_NATIVE (0 << 0)
+#define USE_SEL_GPIO (1 << 0)
+
+/* LP_CONFIG2 reg bits */
+#define GPINDIS_BIT BIT(2) /* disable input sensing */
+#define GPIWP_MASK GENMASK(1, 0) /* weak pull options */
+#define GPIWP_NONE 0 /* none */
+#define GPIWP_DOWN 1 /* weak pull down */
+#define GPIWP_UP 2 /* weak pull up */
+
+/*
+ * Lynxpoint gpios are controlled through both bitmapped registers and
+ * per gpio specific registers. The bitmapped registers are in chunks of
+ * 3 x 32bit registers to cover all 95 GPIOs
+ *
+ * per gpio specific registers consist of two 32bit registers per gpio
+ * (LP_CONFIG1 and LP_CONFIG2), with 95 GPIOs there's a total of
+ * 190 config registers.
+ *
+ * A simplified view of the register layout look like this:
+ *
+ * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
+ * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
+ * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
+ * ...
+ * LP_INT_ENABLE[31:0] ...
+ * LP_INT_ENABLE[63:32] ...
+ * LP_INT_ENABLE[94:64] ...
+ * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
+ * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
+ * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
+ * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
+ * LP2_CONFIG1 (gpio 2) ...
+ * LP2_CONFIG2 (gpio 2) ...
+ * ...
+ * LP94_CONFIG1 (gpio 94) ...
+ * LP94_CONFIG2 (gpio 94) ...
+ *
+ * IOxAPIC redirection map applies only for gpio 8-10, 13-14, 45-55.
+ */
+
+static struct intel_community *lp_get_community(struct intel_pinctrl *lg,
+ unsigned int pin)
+{
+ struct intel_community *comm;
+ int i;
+
+ for (i = 0; i < lg->ncommunities; i++) {
+ comm = &lg->communities[i];
+ if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
+ return comm;
+ }
+
+ return NULL;
+}
+
+static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset,
+ int reg)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ struct intel_community *comm;
+ int reg_offset;
+
+ comm = lp_get_community(lg, offset);
+ if (!comm)
+ return NULL;
+
+ offset -= comm->pin_base;
+
+ if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
+ /* per gpio specific config registers */
+ reg_offset = offset * 8;
+ else
+ /* bitmapped registers */
+ reg_offset = (offset / 32) * 4;
+
+ return comm->regs + reg_offset + reg;
+}
+
+static bool lp_gpio_acpi_use(struct intel_pinctrl *lg, unsigned int pin)
+{
+ void __iomem *acpi_use;
+
+ acpi_use = lp_gpio_reg(&lg->chip, pin, LP_ACPI_OWNED);
+ if (!acpi_use)
+ return true;
+
+ return !(ioread32(acpi_use) & BIT(pin % 32));
+}
+
+static bool lp_gpio_ioxapic_use(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *ioxapic_use = lp_gpio_reg(chip, offset, LP_IRQ2IOXAPIC);
+ u32 value;
+
+ value = ioread32(ioxapic_use);
+
+ if (offset >= 8 && offset <= 10)
+ return !!(value & BIT(offset - 8 + 0));
+ if (offset >= 13 && offset <= 14)
+ return !!(value & BIT(offset - 13 + 3));
+ if (offset >= 45 && offset <= 55)
+ return !!(value & BIT(offset - 45 + 5));
+
+ return false;
+}
+
+static int lp_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->ngroups;
+}
+
+static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->groups[selector].name;
+}
+
+static int lp_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = lg->soc->groups[selector].pins;
+ *num_pins = lg->soc->groups[selector].npins;
+
+ return 0;
+}
+
+static void lp_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ u32 value, mode;
+
+ value = ioread32(reg);
+
+ mode = value & USE_SEL_MASK;
+ if (mode == USE_SEL_GPIO)
+ seq_puts(s, "GPIO ");
+ else
+ seq_printf(s, "mode %d ", mode);
+
+ seq_printf(s, "0x%08x 0x%08x", value, ioread32(conf2));
+
+ if (lp_gpio_acpi_use(lg, pin))
+ seq_puts(s, " [ACPI]");
+}
+
+static const struct pinctrl_ops lptlp_pinctrl_ops = {
+ .get_groups_count = lp_get_groups_count,
+ .get_group_name = lp_get_group_name,
+ .get_group_pins = lp_get_group_pins,
+ .pin_dbg_show = lp_pin_dbg_show,
+};
+
+static int lp_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->nfunctions;
+}
+
+static const char *lp_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->functions[selector].name;
+}
+
+static int lp_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = lg->soc->functions[selector].groups;
+ *num_groups = lg->soc->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pingroup *grp = &lg->soc->groups[group];
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /* Now enable the mux setting for each pin in the group */
+ for (i = 0; i < grp->npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ u32 value;
+
+ value = ioread32(reg);
+
+ value &= ~USE_SEL_MASK;
+ if (grp->modes)
+ value |= grp->modes[i];
+ else
+ value |= grp->mode;
+
+ iowrite32(value, reg);
+ }
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ unsigned long flags;
+ u32 value;
+
+ pm_runtime_get(lg->dev);
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /*
+ * Reconfigure pin to GPIO mode if needed and issue a warning,
+ * since we expect firmware to configure it properly.
+ */
+ value = ioread32(reg);
+ if ((value & USE_SEL_MASK) != USE_SEL_GPIO) {
+ iowrite32((value & USE_SEL_MASK) | USE_SEL_GPIO, reg);
+ dev_warn(lg->dev, FW_BUG "pin %u forcibly reconfigured as GPIO\n", pin);
+ }
+
+ /* Enable input sensing */
+ iowrite32(ioread32(conf2) & ~GPINDIS_BIT, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /* Disable input sensing */
+ iowrite32(ioread32(conf2) | GPINDIS_BIT, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ pm_runtime_put(lg->dev);
+}
+
+static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ value = ioread32(reg);
+ value &= ~DIR_BIT;
+ if (input) {
+ value |= DIR_BIT;
+ } else {
+ /*
+ * Before making any direction modifications, do a check if GPIO
+ * is set for direct IRQ. On Lynxpoint, setting GPIO to output
+ * does not make sense, so let's at least warn the caller before
+ * they shoot themselves in the foot.
+ */
+ WARN(lp_gpio_ioxapic_use(&lg->chip, pin),
+ "Potential Error: Setting GPIO to output with IOxAPIC redirection");
+ }
+ iowrite32(value, reg);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops lptlp_pinmux_ops = {
+ .get_functions_count = lp_get_functions_count,
+ .get_function_name = lp_get_function_name,
+ .get_function_groups = lp_get_function_groups,
+ .set_mux = lp_pinmux_set_mux,
+ .gpio_request_enable = lp_gpio_request_enable,
+ .gpio_disable_free = lp_gpio_disable_free,
+ .gpio_set_direction = lp_gpio_set_direction,
+};
+
+static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned long flags;
+ u32 value, pull;
+ u16 arg = 0;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ value = ioread32(conf2);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ pull = value & GPIWP_MASK;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull != GPIWP_DOWN)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull != GPIWP_UP)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ enum pin_config_param param;
+ unsigned long flags;
+ int i, ret = 0;
+ u32 value;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ value = ioread32(conf2);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value &= ~GPIWP_MASK;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value &= ~GPIWP_MASK;
+ value |= GPIWP_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value &= ~GPIWP_MASK;
+ value |= GPIWP_UP;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ iowrite32(value, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return ret;
+}
+
+static const struct pinconf_ops lptlp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = lp_pin_config_get,
+ .pin_config_set = lp_pin_config_set,
+};
+
+static const struct pinctrl_desc lptlp_pinctrl_desc = {
+ .pctlops = &lptlp_pinctrl_ops,
+ .pmxops = &lptlp_pinmux_ops,
+ .confops = &lptlp_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int lp_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ return !!(ioread32(reg) & IN_LVL_BIT);
+}
+
+static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ if (value)
+ iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
+ else
+ iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ lp_gpio_set(chip, offset, value);
+
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int lp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+
+ if (ioread32(reg) & DIR_BIT)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void lp_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ void __iomem *reg, *ena;
+ unsigned long pending;
+ u32 base, pin;
+
+ /* check from GPIO controller which pin triggered the interrupt */
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+
+ /* Only interrupts that are enabled */
+ pending = ioread32(reg) & ioread32(ena);
+
+ for_each_set_bit(pin, &pending, 32) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
+ generic_handle_irq(irq);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static void lp_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lp_irq_mask(struct irq_data *d)
+{
+}
+
+static void lp_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+ unsigned long flags;
+ u32 value;
+
+ if (hwirq >= lg->chip.ngpio)
+ return -EINVAL;
+
+ /* Fail if BIOS reserved pin for ACPI use */
+ if (lp_gpio_acpi_use(lg, hwirq)) {
+ dev_err(lg->dev, "pin %u can't be used as IRQ\n", hwirq);
+ return -EBUSY;
+ }
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ value = ioread32(reg);
+
+ /* set both TRIG_SEL and INV bits to 0 for rising edge */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
+
+ /* TRIG_SEL bit 0, INV bit 1 for falling edge */
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 0 for level low */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 1 for level high */
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ value |= TRIG_SEL_BIT | INT_INV_BIT;
+
+ iowrite32(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip lp_irqchip = {
+ .name = "LP-GPIO",
+ .irq_ack = lp_irq_ack,
+ .irq_mask = lp_irq_mask,
+ .irq_unmask = lp_irq_unmask,
+ .irq_enable = lp_irq_enable,
+ .irq_disable = lp_irq_disable,
+ .irq_set_type = lp_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ void __iomem *reg;
+ unsigned int base;
+
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ /* disable gpio pin interrupts */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+ iowrite32(0, reg);
+ /* Clear interrupt status register */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ iowrite32(0xffffffff, reg);
+ }
+
+ return 0;
+}
+
+static int lp_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ struct device *dev = lg->dev;
+ int ret;
+
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, lg->soc->npins);
+ if (ret)
+ dev_err(dev, "failed to add GPIO pin range\n");
+
+ return ret;
+}
+
+static int lp_gpio_probe(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_pinctrl *lg;
+ struct gpio_chip *gc;
+ struct resource *io_rc, *irq_rc;
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ unsigned int i;
+ int ret;
+
+ soc = (const struct intel_pinctrl_soc_data *)device_get_match_data(dev);
+ if (!soc)
+ return -ENODEV;
+
+ lg = devm_kzalloc(dev, sizeof(*lg), GFP_KERNEL);
+ if (!lg)
+ return -ENOMEM;
+
+ lg->dev = dev;
+ lg->soc = soc;
+
+ lg->ncommunities = lg->soc->ncommunities;
+ lg->communities = devm_kcalloc(dev, lg->ncommunities,
+ sizeof(*lg->communities), GFP_KERNEL);
+ if (!lg->communities)
+ return -ENOMEM;
+
+ lg->pctldesc = lptlp_pinctrl_desc;
+ lg->pctldesc.name = dev_name(dev);
+ lg->pctldesc.pins = lg->soc->pins;
+ lg->pctldesc.npins = lg->soc->npins;
+
+ lg->pctldev = devm_pinctrl_register(dev, &lg->pctldesc, lg);
+ if (IS_ERR(lg->pctldev)) {
+ dev_err(dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(lg->pctldev);
+ }
+
+ platform_set_drvdata(pdev, lg);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ regs = devm_ioport_map(dev, io_rc->start, resource_size(io_rc));
+ if (!regs) {
+ dev_err(dev, "failed mapping IO region %pR\n", &io_rc);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < lg->soc->ncommunities; i++) {
+ struct intel_community *comm = &lg->communities[i];
+
+ *comm = lg->soc->communities[i];
+
+ comm->regs = regs;
+ comm->pad_regs = regs + 0x100;
+ }
+
+ raw_spin_lock_init(&lg->lock);
+
+ gc = &lg->chip;
+ gc->label = dev_name(dev);
+ gc->owner = THIS_MODULE;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->direction_input = lp_gpio_direction_input;
+ gc->direction_output = lp_gpio_direction_output;
+ gc->get = lp_gpio_get;
+ gc->set = lp_gpio_set;
+ gc->get_direction = lp_gpio_get_direction;
+ gc->base = -1;
+ gc->ngpio = LP_NUM_GPIO;
+ gc->can_sleep = false;
+ gc->add_pin_ranges = lp_gpio_add_pin_ranges;
+ gc->parent = dev;
+
+ /* set up interrupts */
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_rc && irq_rc->start) {
+ struct gpio_irq_chip *girq;
+
+ girq = &gc->irq;
+ girq->chip = &lp_irqchip;
+ girq->init_hw = lp_gpio_irq_init_hw;
+ girq->parent_handler = lp_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ }
+
+ ret = devm_gpiochip_add_data(dev, gc, lg);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int lp_gpio_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int lp_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_resume(struct device *dev)
+{
+ struct intel_pinctrl *lg = dev_get_drvdata(dev);
+ void __iomem *reg;
+ int i;
+
+ /* on some hardware suspend clears input sensing, re-enable it here */
+ for (i = 0; i < lg->chip.ngpio; i++) {
+ if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+ reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+ iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg);
+ }
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops lp_gpio_pm_ops = {
+ .runtime_suspend = lp_gpio_runtime_suspend,
+ .runtime_resume = lp_gpio_runtime_resume,
+ .resume = lp_gpio_resume,
+};
+
+static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+ { "INT33C7", (kernel_ulong_t)&lptlp_soc_data },
+ { "INT3437", (kernel_ulong_t)&lptlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+
+static struct platform_driver lp_gpio_driver = {
+ .probe = lp_gpio_probe,
+ .remove = lp_gpio_remove,
+ .driver = {
+ .name = "lp_gpio",
+ .pm = &lp_gpio_pm_ops,
+ .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+ },
+};
+
+static int __init lp_gpio_init(void)
+{
+ return platform_driver_register(&lp_gpio_driver);
+}
+
+static void __exit lp_gpio_exit(void)
+{
+ platform_driver_unregister(&lp_gpio_driver);
+}
+
+subsys_initcall(lp_gpio_init);
+module_exit(lp_gpio_exit);
+
+MODULE_AUTHOR("Mathias Nyman (Intel)");
+MODULE_AUTHOR("Andy Shevchenko (Intel)");
+MODULE_DESCRIPTION("Intel Lynxpoint pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 44d7f50bbc82..330c8f077b73 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -49,6 +49,7 @@
.padown_offset = SPT_PAD_OWN, \
.padcfglock_offset = SPT_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
+ .is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
@@ -588,6 +589,7 @@ static const struct intel_pinctrl_soc_data spth_soc_data = {
static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
{ "INT344B", (kernel_ulong_t)&sptlp_soc_data },
+ { "INT3451", (kernel_ulong_t)&spth_soc_data },
{ "INT345D", (kernel_ulong_t)&spth_soc_data },
{ }
};
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 58572b15b3ce..08a86f6fdea6 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -2,7 +2,7 @@
/*
* Intel Tiger Lake PCH pinctrl/GPIO driver
*
- * Copyright (C) 2019, Intel Corporation
+ * Copyright (C) 2019 - 2020, Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
@@ -21,15 +21,19 @@
#define TGL_GPI_IS 0x100
#define TGL_GPI_IE 0x120
-#define TGL_GPP(r, s, e) \
+#define TGL_NO_GPIO -1
+
+#define TGL_GPP(r, s, e, g) \
{ \
.reg_num = (r), \
.base = (s), \
.size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
}
-#define TGL_COMMUNITY(s, e, g) \
+#define TGL_COMMUNITY(b, s, e, g) \
{ \
+ .barno = (b), \
.padown_offset = TGL_PAD_OWN, \
.padcfglock_offset = TGL_PADCFGLOCK, \
.hostown_offset = TGL_HOSTSW_OWN, \
@@ -42,7 +46,7 @@
}
/* Tiger Lake-LP */
-static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+static const struct pinctrl_pin_desc tgllp_pins[] = {
/* GPP_B */
PINCTRL_PIN(0, "CORE_VID_0"),
PINCTRL_PIN(1, "CORE_VID_1"),
@@ -113,324 +117,273 @@ static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
PINCTRL_PIN(64, "GPPC_A_22"),
PINCTRL_PIN(65, "I2S1_SCLK"),
PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
-};
-
-static const struct intel_padgroup tgllp_community0_gpps[] = {
- TGL_GPP(0, 0, 25), /* GPP_B */
- TGL_GPP(1, 26, 41), /* GPP_T */
- TGL_GPP(2, 42, 66), /* GPP_A */
-};
-
-static const struct intel_community tgllp_community0[] = {
- TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
- .uid = "0",
- .pins = tgllp_community0_pins,
- .npins = ARRAY_SIZE(tgllp_community0_pins),
- .communities = tgllp_community0,
- .ncommunities = ARRAY_SIZE(tgllp_community0),
-};
-
-static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
/* GPP_S */
- PINCTRL_PIN(0, "SNDW0_CLK"),
- PINCTRL_PIN(1, "SNDW0_DATA"),
- PINCTRL_PIN(2, "SNDW1_CLK"),
- PINCTRL_PIN(3, "SNDW1_DATA"),
- PINCTRL_PIN(4, "SNDW2_CLK"),
- PINCTRL_PIN(5, "SNDW2_DATA"),
- PINCTRL_PIN(6, "SNDW3_CLK"),
- PINCTRL_PIN(7, "SNDW3_DATA"),
+ PINCTRL_PIN(67, "SNDW0_CLK"),
+ PINCTRL_PIN(68, "SNDW0_DATA"),
+ PINCTRL_PIN(69, "SNDW1_CLK"),
+ PINCTRL_PIN(70, "SNDW1_DATA"),
+ PINCTRL_PIN(71, "SNDW2_CLK"),
+ PINCTRL_PIN(72, "SNDW2_DATA"),
+ PINCTRL_PIN(73, "SNDW3_CLK"),
+ PINCTRL_PIN(74, "SNDW3_DATA"),
/* GPP_H */
- PINCTRL_PIN(8, "GPPC_H_0"),
- PINCTRL_PIN(9, "GPPC_H_1"),
- PINCTRL_PIN(10, "GPPC_H_2"),
- PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
- PINCTRL_PIN(12, "I2C2_SDA"),
- PINCTRL_PIN(13, "I2C2_SCL"),
- PINCTRL_PIN(14, "I2C3_SDA"),
- PINCTRL_PIN(15, "I2C3_SCL"),
- PINCTRL_PIN(16, "I2C4_SDA"),
- PINCTRL_PIN(17, "I2C4_SCL"),
- PINCTRL_PIN(18, "SRCCLKREQB_4"),
- PINCTRL_PIN(19, "SRCCLKREQB_5"),
- PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
- PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
- PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
- PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
- PINCTRL_PIN(24, "DDPB_CTRLCLK"),
- PINCTRL_PIN(25, "DDPB_CTRLDATA"),
- PINCTRL_PIN(26, "CPU_C10_GATEB"),
- PINCTRL_PIN(27, "TIME_SYNC_0"),
- PINCTRL_PIN(28, "IMGCLKOUT_1"),
- PINCTRL_PIN(29, "IMGCLKOUT_2"),
- PINCTRL_PIN(30, "IMGCLKOUT_3"),
- PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ PINCTRL_PIN(75, "GPPC_H_0"),
+ PINCTRL_PIN(76, "GPPC_H_1"),
+ PINCTRL_PIN(77, "GPPC_H_2"),
+ PINCTRL_PIN(78, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(79, "I2C2_SDA"),
+ PINCTRL_PIN(80, "I2C2_SCL"),
+ PINCTRL_PIN(81, "I2C3_SDA"),
+ PINCTRL_PIN(82, "I2C3_SCL"),
+ PINCTRL_PIN(83, "I2C4_SDA"),
+ PINCTRL_PIN(84, "I2C4_SCL"),
+ PINCTRL_PIN(85, "SRCCLKREQB_4"),
+ PINCTRL_PIN(86, "SRCCLKREQB_5"),
+ PINCTRL_PIN(87, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(88, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(89, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(90, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(91, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(92, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(93, "CPU_C10_GATEB"),
+ PINCTRL_PIN(94, "TIME_SYNC_0"),
+ PINCTRL_PIN(95, "IMGCLKOUT_1"),
+ PINCTRL_PIN(96, "IMGCLKOUT_2"),
+ PINCTRL_PIN(97, "IMGCLKOUT_3"),
+ PINCTRL_PIN(98, "IMGCLKOUT_4"),
/* GPP_D */
- PINCTRL_PIN(32, "ISH_GP_0"),
- PINCTRL_PIN(33, "ISH_GP_1"),
- PINCTRL_PIN(34, "ISH_GP_2"),
- PINCTRL_PIN(35, "ISH_GP_3"),
- PINCTRL_PIN(36, "IMGCLKOUT_0"),
- PINCTRL_PIN(37, "SRCCLKREQB_0"),
- PINCTRL_PIN(38, "SRCCLKREQB_1"),
- PINCTRL_PIN(39, "SRCCLKREQB_2"),
- PINCTRL_PIN(40, "SRCCLKREQB_3"),
- PINCTRL_PIN(41, "ISH_SPI_CSB"),
- PINCTRL_PIN(42, "ISH_SPI_CLK"),
- PINCTRL_PIN(43, "ISH_SPI_MISO"),
- PINCTRL_PIN(44, "ISH_SPI_MOSI"),
- PINCTRL_PIN(45, "ISH_UART0_RXD"),
- PINCTRL_PIN(46, "ISH_UART0_TXD"),
- PINCTRL_PIN(47, "ISH_UART0_RTSB"),
- PINCTRL_PIN(48, "ISH_UART0_CTSB"),
- PINCTRL_PIN(49, "ISH_GP_4"),
- PINCTRL_PIN(50, "ISH_GP_5"),
- PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
- PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(99, "ISH_GP_0"),
+ PINCTRL_PIN(100, "ISH_GP_1"),
+ PINCTRL_PIN(101, "ISH_GP_2"),
+ PINCTRL_PIN(102, "ISH_GP_3"),
+ PINCTRL_PIN(103, "IMGCLKOUT_0"),
+ PINCTRL_PIN(104, "SRCCLKREQB_0"),
+ PINCTRL_PIN(105, "SRCCLKREQB_1"),
+ PINCTRL_PIN(106, "SRCCLKREQB_2"),
+ PINCTRL_PIN(107, "SRCCLKREQB_3"),
+ PINCTRL_PIN(108, "ISH_SPI_CSB"),
+ PINCTRL_PIN(109, "ISH_SPI_CLK"),
+ PINCTRL_PIN(110, "ISH_SPI_MISO"),
+ PINCTRL_PIN(111, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(112, "ISH_UART0_RXD"),
+ PINCTRL_PIN(113, "ISH_UART0_TXD"),
+ PINCTRL_PIN(114, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(115, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(116, "ISH_GP_4"),
+ PINCTRL_PIN(117, "ISH_GP_5"),
+ PINCTRL_PIN(118, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(119, "GSPI2_CLK_LOOPBK"),
/* GPP_U */
- PINCTRL_PIN(53, "UART3_RXD"),
- PINCTRL_PIN(54, "UART3_TXD"),
- PINCTRL_PIN(55, "UART3_RTSB"),
- PINCTRL_PIN(56, "UART3_CTSB"),
- PINCTRL_PIN(57, "GSPI3_CS0B"),
- PINCTRL_PIN(58, "GSPI3_CLK"),
- PINCTRL_PIN(59, "GSPI3_MISO"),
- PINCTRL_PIN(60, "GSPI3_MOSI"),
- PINCTRL_PIN(61, "GSPI4_CS0B"),
- PINCTRL_PIN(62, "GSPI4_CLK"),
- PINCTRL_PIN(63, "GSPI4_MISO"),
- PINCTRL_PIN(64, "GSPI4_MOSI"),
- PINCTRL_PIN(65, "GSPI5_CS0B"),
- PINCTRL_PIN(66, "GSPI5_CLK"),
- PINCTRL_PIN(67, "GSPI5_MISO"),
- PINCTRL_PIN(68, "GSPI5_MOSI"),
- PINCTRL_PIN(69, "GSPI6_CS0B"),
- PINCTRL_PIN(70, "GSPI6_CLK"),
- PINCTRL_PIN(71, "GSPI6_MISO"),
- PINCTRL_PIN(72, "GSPI6_MOSI"),
- PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
- PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
- PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
- PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ PINCTRL_PIN(120, "UART3_RXD"),
+ PINCTRL_PIN(121, "UART3_TXD"),
+ PINCTRL_PIN(122, "UART3_RTSB"),
+ PINCTRL_PIN(123, "UART3_CTSB"),
+ PINCTRL_PIN(124, "GSPI3_CS0B"),
+ PINCTRL_PIN(125, "GSPI3_CLK"),
+ PINCTRL_PIN(126, "GSPI3_MISO"),
+ PINCTRL_PIN(127, "GSPI3_MOSI"),
+ PINCTRL_PIN(128, "GSPI4_CS0B"),
+ PINCTRL_PIN(129, "GSPI4_CLK"),
+ PINCTRL_PIN(130, "GSPI4_MISO"),
+ PINCTRL_PIN(131, "GSPI4_MOSI"),
+ PINCTRL_PIN(132, "GSPI5_CS0B"),
+ PINCTRL_PIN(133, "GSPI5_CLK"),
+ PINCTRL_PIN(134, "GSPI5_MISO"),
+ PINCTRL_PIN(135, "GSPI5_MOSI"),
+ PINCTRL_PIN(136, "GSPI6_CS0B"),
+ PINCTRL_PIN(137, "GSPI6_CLK"),
+ PINCTRL_PIN(138, "GSPI6_MISO"),
+ PINCTRL_PIN(139, "GSPI6_MOSI"),
+ PINCTRL_PIN(140, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(141, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(142, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(143, "GSPI6_CLK_LOOPBK"),
/* vGPIO */
- PINCTRL_PIN(77, "CNV_BTEN"),
- PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
- PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
- PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
- PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
- PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
- PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
- PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
- PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
- PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
- PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
- PINCTRL_PIN(88, "vUART0_TXD"),
- PINCTRL_PIN(89, "vUART0_RXD"),
- PINCTRL_PIN(90, "vUART0_CTS_B"),
- PINCTRL_PIN(91, "vUART0_RTS_B"),
- PINCTRL_PIN(92, "vISH_UART0_TXD"),
- PINCTRL_PIN(93, "vISH_UART0_RXD"),
- PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
- PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
- PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
- PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
- PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
- PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
- PINCTRL_PIN(100, "vI2S2_SCLK"),
- PINCTRL_PIN(101, "vI2S2_SFRM"),
- PINCTRL_PIN(102, "vI2S2_TXD"),
- PINCTRL_PIN(103, "vI2S2_RXD"),
-};
-
-static const struct intel_padgroup tgllp_community1_gpps[] = {
- TGL_GPP(0, 0, 7), /* GPP_S */
- TGL_GPP(1, 8, 31), /* GPP_H */
- TGL_GPP(2, 32, 52), /* GPP_D */
- TGL_GPP(3, 53, 76), /* GPP_U */
- TGL_GPP(4, 77, 103), /* vGPIO */
-};
-
-static const struct intel_community tgllp_community1[] = {
- TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
- .uid = "1",
- .pins = tgllp_community1_pins,
- .npins = ARRAY_SIZE(tgllp_community1_pins),
- .communities = tgllp_community1,
- .ncommunities = ARRAY_SIZE(tgllp_community1),
-};
-
-static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ PINCTRL_PIN(144, "CNV_BTEN"),
+ PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(146, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(147, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(148, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(151, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(152, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(155, "vUART0_TXD"),
+ PINCTRL_PIN(156, "vUART0_RXD"),
+ PINCTRL_PIN(157, "vUART0_CTS_B"),
+ PINCTRL_PIN(158, "vUART0_RTS_B"),
+ PINCTRL_PIN(159, "vISH_UART0_TXD"),
+ PINCTRL_PIN(160, "vISH_UART0_RXD"),
+ PINCTRL_PIN(161, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(162, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(163, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(164, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(165, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(166, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(167, "vI2S2_SCLK"),
+ PINCTRL_PIN(168, "vI2S2_SFRM"),
+ PINCTRL_PIN(169, "vI2S2_TXD"),
+ PINCTRL_PIN(170, "vI2S2_RXD"),
/* GPP_C */
- PINCTRL_PIN(0, "SMBCLK"),
- PINCTRL_PIN(1, "SMBDATA"),
- PINCTRL_PIN(2, "SMBALERTB"),
- PINCTRL_PIN(3, "SML0CLK"),
- PINCTRL_PIN(4, "SML0DATA"),
- PINCTRL_PIN(5, "SML0ALERTB"),
- PINCTRL_PIN(6, "SML1CLK"),
- PINCTRL_PIN(7, "SML1DATA"),
- PINCTRL_PIN(8, "UART0_RXD"),
- PINCTRL_PIN(9, "UART0_TXD"),
- PINCTRL_PIN(10, "UART0_RTSB"),
- PINCTRL_PIN(11, "UART0_CTSB"),
- PINCTRL_PIN(12, "UART1_RXD"),
- PINCTRL_PIN(13, "UART1_TXD"),
- PINCTRL_PIN(14, "UART1_RTSB"),
- PINCTRL_PIN(15, "UART1_CTSB"),
- PINCTRL_PIN(16, "I2C0_SDA"),
- PINCTRL_PIN(17, "I2C0_SCL"),
- PINCTRL_PIN(18, "I2C1_SDA"),
- PINCTRL_PIN(19, "I2C1_SCL"),
- PINCTRL_PIN(20, "UART2_RXD"),
- PINCTRL_PIN(21, "UART2_TXD"),
- PINCTRL_PIN(22, "UART2_RTSB"),
- PINCTRL_PIN(23, "UART2_CTSB"),
+ PINCTRL_PIN(171, "SMBCLK"),
+ PINCTRL_PIN(172, "SMBDATA"),
+ PINCTRL_PIN(173, "SMBALERTB"),
+ PINCTRL_PIN(174, "SML0CLK"),
+ PINCTRL_PIN(175, "SML0DATA"),
+ PINCTRL_PIN(176, "SML0ALERTB"),
+ PINCTRL_PIN(177, "SML1CLK"),
+ PINCTRL_PIN(178, "SML1DATA"),
+ PINCTRL_PIN(179, "UART0_RXD"),
+ PINCTRL_PIN(180, "UART0_TXD"),
+ PINCTRL_PIN(181, "UART0_RTSB"),
+ PINCTRL_PIN(182, "UART0_CTSB"),
+ PINCTRL_PIN(183, "UART1_RXD"),
+ PINCTRL_PIN(184, "UART1_TXD"),
+ PINCTRL_PIN(185, "UART1_RTSB"),
+ PINCTRL_PIN(186, "UART1_CTSB"),
+ PINCTRL_PIN(187, "I2C0_SDA"),
+ PINCTRL_PIN(188, "I2C0_SCL"),
+ PINCTRL_PIN(189, "I2C1_SDA"),
+ PINCTRL_PIN(190, "I2C1_SCL"),
+ PINCTRL_PIN(191, "UART2_RXD"),
+ PINCTRL_PIN(192, "UART2_TXD"),
+ PINCTRL_PIN(193, "UART2_RTSB"),
+ PINCTRL_PIN(194, "UART2_CTSB"),
/* GPP_F */
- PINCTRL_PIN(24, "CNV_BRI_DT"),
- PINCTRL_PIN(25, "CNV_BRI_RSP"),
- PINCTRL_PIN(26, "CNV_RGI_DT"),
- PINCTRL_PIN(27, "CNV_RGI_RSP"),
- PINCTRL_PIN(28, "CNV_RF_RESET_B"),
- PINCTRL_PIN(29, "GPPC_F_5"),
- PINCTRL_PIN(30, "CNV_PA_BLANKING"),
- PINCTRL_PIN(31, "GPPC_F_7"),
- PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
- PINCTRL_PIN(33, "BOOTMPC"),
- PINCTRL_PIN(34, "GPPC_F_10"),
- PINCTRL_PIN(35, "GPPC_F_11"),
- PINCTRL_PIN(36, "GSXDOUT"),
- PINCTRL_PIN(37, "GSXSLOAD"),
- PINCTRL_PIN(38, "GSXDIN"),
- PINCTRL_PIN(39, "GSXSRESETB"),
- PINCTRL_PIN(40, "GSXCLK"),
- PINCTRL_PIN(41, "GMII_MDC"),
- PINCTRL_PIN(42, "GMII_MDIO"),
- PINCTRL_PIN(43, "SRCCLKREQB_6"),
- PINCTRL_PIN(44, "EXT_PWR_GATEB"),
- PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
- PINCTRL_PIN(46, "VNN_CTRL"),
- PINCTRL_PIN(47, "V1P05_CTRL"),
- PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ PINCTRL_PIN(195, "CNV_BRI_DT"),
+ PINCTRL_PIN(196, "CNV_BRI_RSP"),
+ PINCTRL_PIN(197, "CNV_RGI_DT"),
+ PINCTRL_PIN(198, "CNV_RGI_RSP"),
+ PINCTRL_PIN(199, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(200, "GPPC_F_5"),
+ PINCTRL_PIN(201, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(202, "GPPC_F_7"),
+ PINCTRL_PIN(203, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(204, "BOOTMPC"),
+ PINCTRL_PIN(205, "GPPC_F_10"),
+ PINCTRL_PIN(206, "GPPC_F_11"),
+ PINCTRL_PIN(207, "GSXDOUT"),
+ PINCTRL_PIN(208, "GSXSLOAD"),
+ PINCTRL_PIN(209, "GSXDIN"),
+ PINCTRL_PIN(210, "GSXSRESETB"),
+ PINCTRL_PIN(211, "GSXCLK"),
+ PINCTRL_PIN(212, "GMII_MDC"),
+ PINCTRL_PIN(213, "GMII_MDIO"),
+ PINCTRL_PIN(214, "SRCCLKREQB_6"),
+ PINCTRL_PIN(215, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(216, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(217, "VNN_CTRL"),
+ PINCTRL_PIN(218, "V1P05_CTRL"),
+ PINCTRL_PIN(219, "GPPF_CLK_LOOPBACK"),
/* HVCMOS */
- PINCTRL_PIN(49, "L_BKLTEN"),
- PINCTRL_PIN(50, "L_BKLTCTL"),
- PINCTRL_PIN(51, "L_VDDEN"),
- PINCTRL_PIN(52, "SYS_PWROK"),
- PINCTRL_PIN(53, "SYS_RESETB"),
- PINCTRL_PIN(54, "MLK_RSTB"),
+ PINCTRL_PIN(220, "L_BKLTEN"),
+ PINCTRL_PIN(221, "L_BKLTCTL"),
+ PINCTRL_PIN(222, "L_VDDEN"),
+ PINCTRL_PIN(223, "SYS_PWROK"),
+ PINCTRL_PIN(224, "SYS_RESETB"),
+ PINCTRL_PIN(225, "MLK_RSTB"),
/* GPP_E */
- PINCTRL_PIN(55, "SATAXPCIE_0"),
- PINCTRL_PIN(56, "SPI1_IO_2"),
- PINCTRL_PIN(57, "SPI1_IO_3"),
- PINCTRL_PIN(58, "CPU_GP_0"),
- PINCTRL_PIN(59, "SATA_DEVSLP_0"),
- PINCTRL_PIN(60, "SATA_DEVSLP_1"),
- PINCTRL_PIN(61, "GPPC_E_6"),
- PINCTRL_PIN(62, "CPU_GP_1"),
- PINCTRL_PIN(63, "SPI1_CS1B"),
- PINCTRL_PIN(64, "USB2_OCB_0"),
- PINCTRL_PIN(65, "SPI1_CSB"),
- PINCTRL_PIN(66, "SPI1_CLK"),
- PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
- PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
- PINCTRL_PIN(69, "DDSP_HPD_A"),
- PINCTRL_PIN(70, "ISH_GP_6"),
- PINCTRL_PIN(71, "ISH_GP_7"),
- PINCTRL_PIN(72, "GPPC_E_17"),
- PINCTRL_PIN(73, "DDP1_CTRLCLK"),
- PINCTRL_PIN(74, "DDP1_CTRLDATA"),
- PINCTRL_PIN(75, "DDP2_CTRLCLK"),
- PINCTRL_PIN(76, "DDP2_CTRLDATA"),
- PINCTRL_PIN(77, "DDPA_CTRLCLK"),
- PINCTRL_PIN(78, "DDPA_CTRLDATA"),
- PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(226, "SATAXPCIE_0"),
+ PINCTRL_PIN(227, "SPI1_IO_2"),
+ PINCTRL_PIN(228, "SPI1_IO_3"),
+ PINCTRL_PIN(229, "CPU_GP_0"),
+ PINCTRL_PIN(230, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(231, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(232, "GPPC_E_6"),
+ PINCTRL_PIN(233, "CPU_GP_1"),
+ PINCTRL_PIN(234, "SPI1_CS1B"),
+ PINCTRL_PIN(235, "USB2_OCB_0"),
+ PINCTRL_PIN(236, "SPI1_CSB"),
+ PINCTRL_PIN(237, "SPI1_CLK"),
+ PINCTRL_PIN(238, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(239, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(240, "DDSP_HPD_A"),
+ PINCTRL_PIN(241, "ISH_GP_6"),
+ PINCTRL_PIN(242, "ISH_GP_7"),
+ PINCTRL_PIN(243, "GPPC_E_17"),
+ PINCTRL_PIN(244, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(245, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(246, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(247, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(248, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(249, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(250, "SPI1_CLK_LOOPBK"),
/* JTAG */
- PINCTRL_PIN(80, "JTAG_TDO"),
- PINCTRL_PIN(81, "JTAGX"),
- PINCTRL_PIN(82, "PRDYB"),
- PINCTRL_PIN(83, "PREQB"),
- PINCTRL_PIN(84, "CPU_TRSTB"),
- PINCTRL_PIN(85, "JTAG_TDI"),
- PINCTRL_PIN(86, "JTAG_TMS"),
- PINCTRL_PIN(87, "JTAG_TCK"),
- PINCTRL_PIN(88, "DBG_PMODE"),
-};
-
-static const struct intel_padgroup tgllp_community4_gpps[] = {
- TGL_GPP(0, 0, 23), /* GPP_C */
- TGL_GPP(1, 24, 48), /* GPP_F */
- TGL_GPP(2, 49, 54), /* HVCMOS */
- TGL_GPP(3, 55, 79), /* GPP_E */
- TGL_GPP(4, 80, 88), /* JTAG */
+ PINCTRL_PIN(251, "JTAG_TDO"),
+ PINCTRL_PIN(252, "JTAGX"),
+ PINCTRL_PIN(253, "PRDYB"),
+ PINCTRL_PIN(254, "PREQB"),
+ PINCTRL_PIN(255, "CPU_TRSTB"),
+ PINCTRL_PIN(256, "JTAG_TDI"),
+ PINCTRL_PIN(257, "JTAG_TMS"),
+ PINCTRL_PIN(258, "JTAG_TCK"),
+ PINCTRL_PIN(259, "DBG_PMODE"),
+ /* GPP_R */
+ PINCTRL_PIN(260, "HDA_BCLK"),
+ PINCTRL_PIN(261, "HDA_SYNC"),
+ PINCTRL_PIN(262, "HDA_SDO"),
+ PINCTRL_PIN(263, "HDA_SDI_0"),
+ PINCTRL_PIN(264, "HDA_RSTB"),
+ PINCTRL_PIN(265, "HDA_SDI_1"),
+ PINCTRL_PIN(266, "GPP_R_6"),
+ PINCTRL_PIN(267, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(268, "SPI0_IO_2"),
+ PINCTRL_PIN(269, "SPI0_IO_3"),
+ PINCTRL_PIN(270, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(271, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(272, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(273, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(274, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(275, "SPI0_CLK"),
+ PINCTRL_PIN(276, "SPI0_CLK_LOOPBK"),
};
-static const struct intel_community tgllp_community4[] = {
- TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25, 0), /* GPP_B */
+ TGL_GPP(1, 26, 41, 32), /* GPP_T */
+ TGL_GPP(2, 42, 66, 64), /* GPP_A */
};
-static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
- .uid = "4",
- .pins = tgllp_community4_pins,
- .npins = ARRAY_SIZE(tgllp_community4_pins),
- .communities = tgllp_community4,
- .ncommunities = ARRAY_SIZE(tgllp_community4),
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 67, 74, 96), /* GPP_S */
+ TGL_GPP(1, 75, 98, 128), /* GPP_H */
+ TGL_GPP(2, 99, 119, 160), /* GPP_D */
+ TGL_GPP(3, 120, 143, 192), /* GPP_U */
+ TGL_GPP(4, 144, 170, 224), /* vGPIO */
};
-static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
- /* GPP_R */
- PINCTRL_PIN(0, "HDA_BCLK"),
- PINCTRL_PIN(1, "HDA_SYNC"),
- PINCTRL_PIN(2, "HDA_SDO"),
- PINCTRL_PIN(3, "HDA_SDI_0"),
- PINCTRL_PIN(4, "HDA_RSTB"),
- PINCTRL_PIN(5, "HDA_SDI_1"),
- PINCTRL_PIN(6, "GPP_R_6"),
- PINCTRL_PIN(7, "GPP_R_7"),
- /* SPI */
- PINCTRL_PIN(8, "SPI0_IO_2"),
- PINCTRL_PIN(9, "SPI0_IO_3"),
- PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
- PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
- PINCTRL_PIN(12, "SPI0_TPM_CSB"),
- PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
- PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
- PINCTRL_PIN(15, "SPI0_CLK"),
- PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 171, 194, 256), /* GPP_C */
+ TGL_GPP(1, 195, 219, 288), /* GPP_F */
+ TGL_GPP(2, 220, 225, TGL_NO_GPIO), /* HVCMOS */
+ TGL_GPP(3, 226, 250, 320), /* GPP_E */
+ TGL_GPP(4, 251, 259, TGL_NO_GPIO), /* JTAG */
};
static const struct intel_padgroup tgllp_community5_gpps[] = {
- TGL_GPP(0, 0, 7), /* GPP_R */
- TGL_GPP(1, 8, 16), /* SPI */
-};
-
-static const struct intel_community tgllp_community5[] = {
- TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+ TGL_GPP(0, 260, 267, 352), /* GPP_R */
+ TGL_GPP(1, 268, 276, TGL_NO_GPIO), /* SPI */
};
-static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
- .uid = "5",
- .pins = tgllp_community5_pins,
- .npins = ARRAY_SIZE(tgllp_community5_pins),
- .communities = tgllp_community5,
- .ncommunities = ARRAY_SIZE(tgllp_community5),
+static const struct intel_community tgllp_communities[] = {
+ TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
+ TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
+ TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
+ TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
};
-static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
- &tgllp_community0_soc_data,
- &tgllp_community1_soc_data,
- &tgllp_community4_soc_data,
- &tgllp_community5_soc_data,
- NULL
+static const struct intel_pinctrl_soc_data tgllp_soc_data = {
+ .pins = tgllp_pins,
+ .npins = ARRAY_SIZE(tgllp_pins),
+ .communities = tgllp_communities,
+ .ncommunities = ARRAY_SIZE(tgllp_communities),
};
static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
- { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
@@ -438,7 +391,7 @@ MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
static struct platform_driver tgl_pinctrl_driver = {
- .probe = intel_pinctrl_probe_by_uid,
+ .probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "tigerlake-pinctrl",
.acpi_match_table = tgl_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
index ba2356a8ab89..845c408b5fdb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 MediaTek Inc.
* Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
index 8ff88bf2e849..aa79d7ecee00 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
/*
* Copyright (c) 2017 Baylibre SAS.
* Author: Jerome Brunet <jbrunet@baylibre.com>
@@ -5,7 +6,6 @@
* Copyright (c) 2017 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
*
- * SPDX-License-Identifier: (GPL-2.0+ or MIT)
*/
struct meson_pmx_bank {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 2d5339edd0b7..6cd4b3ec1b40 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -233,6 +233,8 @@ static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
static const unsigned int hdmi_cec_0_pins[] = { GPIOH_3 };
static const unsigned int eth_txd1_0_pins[] = { GPIOH_5 };
static const unsigned int eth_txd0_0_pins[] = { GPIOH_6 };
+static const unsigned int eth_rxd3_h_pins[] = { GPIOH_5 };
+static const unsigned int eth_rxd2_h_pins[] = { GPIOH_6 };
static const unsigned int clk_24m_out_pins[] = { GPIOH_9 };
static const unsigned int spi_ss1_pins[] = { GPIOH_0 };
@@ -535,6 +537,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(spi_miso_1, 9, 12),
GROUP(spi_mosi_1, 9, 11),
GROUP(spi_sclk_1, 9, 10),
+ GROUP(eth_rxd3_h, 6, 15),
+ GROUP(eth_rxd2_h, 6, 14),
GROUP(eth_txd3, 6, 13),
GROUP(eth_txd2, 6, 12),
GROUP(eth_tx_clk, 6, 11),
@@ -746,7 +750,8 @@ static const char * const ethernet_groups[] = {
"eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
"eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
"eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
- "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
+ "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2",
+ "eth_rxd3_h", "eth_rxd2_h"
};
static const char * const i2c_a_groups[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa9dcde0f069..243fba254175 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -733,13 +732,20 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
ret = 0;
break;
}
- };
+ }
if (ret) {
dev_err(dev, "no gpio-controller child node\n");
return ret;
}
- nr_irq_parent = of_irq_count(np);
+ nr_irq_parent = platform_irq_count(pdev);
+ if (nr_irq_parent < 0) {
+ if (nr_irq_parent != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't determine irq count: %pe\n",
+ ERR_PTR(nr_irq_parent));
+ return nr_irq_parent;
+ }
+
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
@@ -776,7 +782,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
- int irq = irq_of_parse_and_map(np, i);
+ int irq = platform_get_irq(pdev, i);
if (irq < 0)
continue;
@@ -800,7 +806,7 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
ret = 0;
break;
}
- };
+ }
if (ret)
return ret;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 22077cbe6880..a935065cdac4 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -331,7 +331,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
return 0;
}
-static struct irq_chip npcmgpio_irqchip = {
+static const struct irq_chip npcmgpio_irqchip = {
.name = "NPCM7XX-GPIO-IRQ",
.irq_ack = npcmgpio_irq_ack,
.irq_unmask = npcmgpio_irq_unmask,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index eab078244a4c..73aff6591de2 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -866,7 +866,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!gpio_dev->base)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index 986e04ac6b5b..d6c9f9dcff97 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -798,7 +798,7 @@ static int artpec6_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
enum pin_config_param param;
unsigned int arg;
unsigned int regval;
- unsigned int *reg;
+ void __iomem *reg;
int i;
/* Check for valid pin */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 369e04350e3d..96f04d121ebd 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,7 +3,7 @@
* Ingenic SoCs pinctrl driver
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/compiler.h>
@@ -42,28 +42,36 @@
#define JZ4760_GPIO_FLAG 0x50
#define JZ4760_GPIO_PEN 0x70
-#define X1000_GPIO_PZ_BASE 0x700
-#define X1000_GPIO_PZ_GID2LD 0x7f0
+#define X1830_GPIO_PEL 0x110
+#define X1830_GPIO_PEH 0x120
#define REG_SET(x) ((x) + 0x4)
#define REG_CLEAR(x) ((x) + 0x8)
+#define REG_PZ_BASE(x) ((x) * 7)
+#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
+
+#define GPIO_PULL_DIS 0
+#define GPIO_PULL_UP 1
+#define GPIO_PULL_DOWN 2
+
#define PINS_PER_GPIO_CHIP 32
enum jz_version {
ID_JZ4740,
ID_JZ4725B,
ID_JZ4760,
- ID_JZ4760B,
ID_JZ4770,
ID_JZ4780,
ID_X1000,
- ID_X1000E,
ID_X1500,
+ ID_X1830,
};
struct ingenic_chip_info {
unsigned int num_chips;
+ unsigned int reg_offset;
+ enum jz_version version;
const struct group_desc *groups;
unsigned int num_groups;
@@ -79,7 +87,6 @@ struct ingenic_pinctrl {
struct regmap *map;
struct pinctrl_dev *pctl;
struct pinctrl_pin_desc *pdesc;
- enum jz_version version;
const struct ingenic_chip_info *info;
};
@@ -216,6 +223,8 @@ static const struct function_desc jz4740_functions[] = {
static const struct ingenic_chip_info jz4740_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_JZ4740,
.groups = jz4740_groups,
.num_groups = ARRAY_SIZE(jz4740_groups),
.functions = jz4740_functions,
@@ -339,6 +348,8 @@ static const struct function_desc jz4725b_functions[] = {
static const struct ingenic_chip_info jz4725b_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_JZ4725B,
.groups = jz4725b_groups,
.num_groups = ARRAY_SIZE(jz4725b_groups),
.functions = jz4725b_functions,
@@ -592,16 +603,8 @@ static const struct function_desc jz4760_functions[] = {
static const struct ingenic_chip_info jz4760_chip_info = {
.num_chips = 6,
- .groups = jz4760_groups,
- .num_groups = ARRAY_SIZE(jz4760_groups),
- .functions = jz4760_functions,
- .num_functions = ARRAY_SIZE(jz4760_functions),
- .pull_ups = jz4760_pull_ups,
- .pull_downs = jz4760_pull_downs,
-};
-
-static const struct ingenic_chip_info jz4760b_chip_info = {
- .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4760,
.groups = jz4760_groups,
.num_groups = ARRAY_SIZE(jz4760_groups),
.functions = jz4760_functions,
@@ -880,6 +883,8 @@ static const struct function_desc jz4770_functions[] = {
static const struct ingenic_chip_info jz4770_chip_info = {
.num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4770,
.groups = jz4770_groups,
.num_groups = ARRAY_SIZE(jz4770_groups),
.functions = jz4770_functions,
@@ -1013,6 +1018,8 @@ static const struct function_desc jz4780_functions[] = {
static const struct ingenic_chip_info jz4780_chip_info = {
.num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4780,
.groups = jz4780_groups,
.num_groups = ARRAY_SIZE(jz4780_groups),
.functions = jz4780_functions,
@@ -1022,7 +1029,7 @@ static const struct ingenic_chip_info jz4780_chip_info = {
};
static const u32 x1000_pull_ups[4] = {
- 0xffffffff, 0x8dffffff, 0x7d3fffff, 0xffffffff,
+ 0xffffffff, 0xfdffffff, 0x0dffffff, 0x0000003f,
};
static const u32 x1000_pull_downs[4] = {
@@ -1033,28 +1040,45 @@ static int x1000_uart0_data_pins[] = { 0x4a, 0x4b, };
static int x1000_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
static int x1000_uart1_data_a_pins[] = { 0x04, 0x05, };
static int x1000_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1000_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1000_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1000_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1000_uart2_data_d_pins[] = { 0x65, 0x64, };
+static int x1000_sfc_pins[] = { 0x1d, 0x1c, 0x1e, 0x1f, 0x1a, 0x1b, };
+static int x1000_ssi_dt_a_22_pins[] = { 0x16, };
+static int x1000_ssi_dt_a_29_pins[] = { 0x1d, };
+static int x1000_ssi_dt_d_pins[] = { 0x62, };
+static int x1000_ssi_dr_a_23_pins[] = { 0x17, };
+static int x1000_ssi_dr_a_28_pins[] = { 0x1c, };
+static int x1000_ssi_dr_d_pins[] = { 0x63, };
+static int x1000_ssi_clk_a_24_pins[] = { 0x18, };
+static int x1000_ssi_clk_a_26_pins[] = { 0x1a, };
+static int x1000_ssi_clk_d_pins[] = { 0x60, };
+static int x1000_ssi_gpc_a_20_pins[] = { 0x14, };
+static int x1000_ssi_gpc_a_31_pins[] = { 0x1f, };
+static int x1000_ssi_ce0_a_25_pins[] = { 0x19, };
+static int x1000_ssi_ce0_a_27_pins[] = { 0x1b, };
+static int x1000_ssi_ce0_d_pins[] = { 0x61, };
+static int x1000_ssi_ce1_a_21_pins[] = { 0x15, };
+static int x1000_ssi_ce1_a_30_pins[] = { 0x1e, };
static int x1000_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
static int x1000_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
static int x1000_mmc0_8bit_pins[] = { 0x13, 0x12, 0x11, 0x10, };
static int x1000_mmc1_1bit_pins[] = { 0x40, 0x41, 0x42, };
static int x1000_mmc1_4bit_pins[] = { 0x43, 0x44, 0x45, };
-static int x1000_nemc_8bit_data_pins[] = {
+static int x1000_emc_8bit_data_pins[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
};
-static int x1000_nemc_16bit_data_pins[] = {
+static int x1000_emc_16bit_data_pins[] = {
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
};
-static int x1000_nemc_addr_pins[] = {
+static int x1000_emc_addr_pins[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
};
-static int x1000_nemc_rd_we_pins[] = { 0x30, 0x31, };
-static int x1000_nemc_wait_pins[] = { 0x34, };
-static int x1000_nemc_cs1_pins[] = { 0x32, };
-static int x1000_nemc_cs2_pins[] = { 0x33, };
+static int x1000_emc_rd_we_pins[] = { 0x30, 0x31, };
+static int x1000_emc_wait_pins[] = { 0x34, };
+static int x1000_emc_cs1_pins[] = { 0x32, };
+static int x1000_emc_cs2_pins[] = { 0x33, };
static int x1000_i2c0_pins[] = { 0x38, 0x37, };
static int x1000_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1000_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1083,23 +1107,40 @@ static int x1000_uart0_data_funcs[] = { 0, 0, };
static int x1000_uart0_hwflow_funcs[] = { 0, 0, };
static int x1000_uart1_data_a_funcs[] = { 2, 2, };
static int x1000_uart1_data_d_funcs[] = { 1, 1, };
-static int x1000_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1000_uart1_hwflow_funcs[] = { 1, 1, };
static int x1000_uart2_data_a_funcs[] = { 2, 2, };
static int x1000_uart2_data_d_funcs[] = { 0, 0, };
+static int x1000_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1000_ssi_dt_a_22_funcs[] = { 2, };
+static int x1000_ssi_dt_a_29_funcs[] = { 2, };
+static int x1000_ssi_dt_d_funcs[] = { 0, };
+static int x1000_ssi_dr_a_23_funcs[] = { 2, };
+static int x1000_ssi_dr_a_28_funcs[] = { 2, };
+static int x1000_ssi_dr_d_funcs[] = { 0, };
+static int x1000_ssi_clk_a_24_funcs[] = { 2, };
+static int x1000_ssi_clk_a_26_funcs[] = { 2, };
+static int x1000_ssi_clk_d_funcs[] = { 0, };
+static int x1000_ssi_gpc_a_20_funcs[] = { 2, };
+static int x1000_ssi_gpc_a_31_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_25_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_27_funcs[] = { 2, };
+static int x1000_ssi_ce0_d_funcs[] = { 0, };
+static int x1000_ssi_ce1_a_21_funcs[] = { 2, };
+static int x1000_ssi_ce1_a_30_funcs[] = { 2, };
static int x1000_mmc0_1bit_funcs[] = { 1, 1, 1, };
static int x1000_mmc0_4bit_funcs[] = { 1, 1, 1, };
static int x1000_mmc0_8bit_funcs[] = { 1, 1, 1, 1, 1, };
static int x1000_mmc1_1bit_funcs[] = { 0, 0, 0, };
static int x1000_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int x1000_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_addr_funcs[] = {
+static int x1000_emc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_addr_funcs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
-static int x1000_nemc_rd_we_funcs[] = { 0, 0, };
-static int x1000_nemc_wait_funcs[] = { 0, };
-static int x1000_nemc_cs1_funcs[] = { 0, };
-static int x1000_nemc_cs2_funcs[] = { 0, };
+static int x1000_emc_rd_we_funcs[] = { 0, 0, };
+static int x1000_emc_wait_funcs[] = { 0, };
+static int x1000_emc_cs1_funcs[] = { 0, };
+static int x1000_emc_cs2_funcs[] = { 0, };
static int x1000_i2c0_funcs[] = { 0, 0, };
static int x1000_i2c1_a_funcs[] = { 2, 2, };
static int x1000_i2c1_c_funcs[] = { 0, 0, };
@@ -1121,21 +1162,38 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", x1000_uart0_hwflow),
INGENIC_PIN_GROUP("uart1-data-a", x1000_uart1_data_a),
INGENIC_PIN_GROUP("uart1-data-d", x1000_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow-d", x1000_uart1_hwflow_d),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow),
INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a),
INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc),
+ INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22),
+ INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29),
+ INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d),
+ INGENIC_PIN_GROUP("ssi-dr-a-23", x1000_ssi_dr_a_23),
+ INGENIC_PIN_GROUP("ssi-dr-a-28", x1000_ssi_dr_a_28),
+ INGENIC_PIN_GROUP("ssi-dr-d", x1000_ssi_dr_d),
+ INGENIC_PIN_GROUP("ssi-clk-a-24", x1000_ssi_clk_a_24),
+ INGENIC_PIN_GROUP("ssi-clk-a-26", x1000_ssi_clk_a_26),
+ INGENIC_PIN_GROUP("ssi-clk-d", x1000_ssi_clk_d),
+ INGENIC_PIN_GROUP("ssi-gpc-a-20", x1000_ssi_gpc_a_20),
+ INGENIC_PIN_GROUP("ssi-gpc-a-31", x1000_ssi_gpc_a_31),
+ INGENIC_PIN_GROUP("ssi-ce0-a-25", x1000_ssi_ce0_a_25),
+ INGENIC_PIN_GROUP("ssi-ce0-a-27", x1000_ssi_ce0_a_27),
+ INGENIC_PIN_GROUP("ssi-ce0-d", x1000_ssi_ce0_d),
+ INGENIC_PIN_GROUP("ssi-ce1-a-21", x1000_ssi_ce1_a_21),
+ INGENIC_PIN_GROUP("ssi-ce1-a-30", x1000_ssi_ce1_a_30),
INGENIC_PIN_GROUP("mmc0-1bit", x1000_mmc0_1bit),
INGENIC_PIN_GROUP("mmc0-4bit", x1000_mmc0_4bit),
INGENIC_PIN_GROUP("mmc0-8bit", x1000_mmc0_8bit),
INGENIC_PIN_GROUP("mmc1-1bit", x1000_mmc1_1bit),
INGENIC_PIN_GROUP("mmc1-4bit", x1000_mmc1_4bit),
- INGENIC_PIN_GROUP("nemc-8bit-data", x1000_nemc_8bit_data),
- INGENIC_PIN_GROUP("nemc-16bit-data", x1000_nemc_16bit_data),
- INGENIC_PIN_GROUP("nemc-addr", x1000_nemc_addr),
- INGENIC_PIN_GROUP("nemc-rd-we", x1000_nemc_rd_we),
- INGENIC_PIN_GROUP("nemc-wait", x1000_nemc_wait),
- INGENIC_PIN_GROUP("nemc-cs1", x1000_nemc_cs1),
- INGENIC_PIN_GROUP("nemc-cs2", x1000_nemc_cs2),
+ INGENIC_PIN_GROUP("emc-8bit-data", x1000_emc_8bit_data),
+ INGENIC_PIN_GROUP("emc-16bit-data", x1000_emc_16bit_data),
+ INGENIC_PIN_GROUP("emc-addr", x1000_emc_addr),
+ INGENIC_PIN_GROUP("emc-rd-we", x1000_emc_rd_we),
+ INGENIC_PIN_GROUP("emc-wait", x1000_emc_wait),
+ INGENIC_PIN_GROUP("emc-cs1", x1000_emc_cs1),
+ INGENIC_PIN_GROUP("emc-cs2", x1000_emc_cs2),
INGENIC_PIN_GROUP("i2c0-data", x1000_i2c0),
INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c),
@@ -1154,21 +1212,30 @@ static const struct group_desc x1000_groups[] = {
static const char *x1000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1000_uart1_groups[] = {
- "uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+ "uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1000_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
+static const char *x1000_sfc_groups[] = { "sfc", };
+static const char *x1000_ssi_groups[] = {
+ "ssi-dt-a-22", "ssi-dt-a-29", "ssi-dt-d",
+ "ssi-dr-a-23", "ssi-dr-a-28", "ssi-dr-d",
+ "ssi-clk-a-24", "ssi-clk-a-26", "ssi-clk-d",
+ "ssi-gpc-a-20", "ssi-gpc-a-31",
+ "ssi-ce0-a-25", "ssi-ce0-a-27", "ssi-ce0-d",
+ "ssi-ce1-a-21", "ssi-ce1-a-30",
+};
static const char *x1000_mmc0_groups[] = {
"mmc0-1bit", "mmc0-4bit", "mmc0-8bit",
};
static const char *x1000_mmc1_groups[] = {
- "mmc1-1bit-e", "mmc1-4bit-e",
+ "mmc1-1bit", "mmc1-4bit",
};
-static const char *x1000_nemc_groups[] = {
- "nemc-8bit-data", "nemc-16bit-data",
- "nemc-addr", "nemc-rd-we", "nemc-wait",
+static const char *x1000_emc_groups[] = {
+ "emc-8bit-data", "emc-16bit-data",
+ "emc-addr", "emc-rd-we", "emc-wait",
};
-static const char *x1000_cs1_groups[] = { "nemc-cs1", };
-static const char *x1000_cs2_groups[] = { "nemc-cs2", };
+static const char *x1000_cs1_groups[] = { "emc-cs1", };
+static const char *x1000_cs2_groups[] = { "emc-cs2", };
static const char *x1000_i2c0_groups[] = { "i2c0-data", };
static const char *x1000_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1000_i2c2_groups[] = { "i2c2-data", };
@@ -1187,11 +1254,13 @@ static const struct function_desc x1000_functions[] = {
{ "uart0", x1000_uart0_groups, ARRAY_SIZE(x1000_uart0_groups), },
{ "uart1", x1000_uart1_groups, ARRAY_SIZE(x1000_uart1_groups), },
{ "uart2", x1000_uart2_groups, ARRAY_SIZE(x1000_uart2_groups), },
+ { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+ { "ssi", x1000_ssi_groups, ARRAY_SIZE(x1000_ssi_groups), },
{ "mmc0", x1000_mmc0_groups, ARRAY_SIZE(x1000_mmc0_groups), },
{ "mmc1", x1000_mmc1_groups, ARRAY_SIZE(x1000_mmc1_groups), },
- { "nemc", x1000_nemc_groups, ARRAY_SIZE(x1000_nemc_groups), },
- { "nemc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
- { "nemc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
+ { "emc", x1000_emc_groups, ARRAY_SIZE(x1000_emc_groups), },
+ { "emc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
+ { "emc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
{ "i2c0", x1000_i2c0_groups, ARRAY_SIZE(x1000_i2c0_groups), },
{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
@@ -1207,16 +1276,8 @@ static const struct function_desc x1000_functions[] = {
static const struct ingenic_chip_info x1000_chip_info = {
.num_chips = 4,
- .groups = x1000_groups,
- .num_groups = ARRAY_SIZE(x1000_groups),
- .functions = x1000_functions,
- .num_functions = ARRAY_SIZE(x1000_functions),
- .pull_ups = x1000_pull_ups,
- .pull_downs = x1000_pull_downs,
-};
-
-static const struct ingenic_chip_info x1000e_chip_info = {
- .num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1000,
.groups = x1000_groups,
.num_groups = ARRAY_SIZE(x1000_groups),
.functions = x1000_functions,
@@ -1229,11 +1290,11 @@ static int x1500_uart0_data_pins[] = { 0x4a, 0x4b, };
static int x1500_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
static int x1500_uart1_data_a_pins[] = { 0x04, 0x05, };
static int x1500_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1500_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1500_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1500_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1500_uart2_data_d_pins[] = { 0x65, 0x64, };
-static int x1500_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
-static int x1500_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
+static int x1500_mmc_1bit_pins[] = { 0x18, 0x19, 0x17, };
+static int x1500_mmc_4bit_pins[] = { 0x16, 0x15, 0x14, };
static int x1500_i2c0_pins[] = { 0x38, 0x37, };
static int x1500_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1500_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1252,11 +1313,11 @@ static int x1500_uart0_data_funcs[] = { 0, 0, };
static int x1500_uart0_hwflow_funcs[] = { 0, 0, };
static int x1500_uart1_data_a_funcs[] = { 2, 2, };
static int x1500_uart1_data_d_funcs[] = { 1, 1, };
-static int x1500_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1500_uart1_hwflow_funcs[] = { 1, 1, };
static int x1500_uart2_data_a_funcs[] = { 2, 2, };
static int x1500_uart2_data_d_funcs[] = { 0, 0, };
-static int x1500_mmc0_1bit_funcs[] = { 1, 1, 1, };
-static int x1500_mmc0_4bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_1bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_4bit_funcs[] = { 1, 1, 1, };
static int x1500_i2c0_funcs[] = { 0, 0, };
static int x1500_i2c1_a_funcs[] = { 2, 2, };
static int x1500_i2c1_c_funcs[] = { 0, 0, };
@@ -1273,11 +1334,12 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", x1500_uart0_hwflow),
INGENIC_PIN_GROUP("uart1-data-a", x1500_uart1_data_a),
INGENIC_PIN_GROUP("uart1-data-d", x1500_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow-d", x1500_uart1_hwflow_d),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow),
INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a),
INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d),
- INGENIC_PIN_GROUP("mmc0-1bit", x1500_mmc0_1bit),
- INGENIC_PIN_GROUP("mmc0-4bit", x1500_mmc0_4bit),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc),
+ INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit),
+ INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit),
INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0),
INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c),
@@ -1293,10 +1355,10 @@ static const struct group_desc x1500_groups[] = {
static const char *x1500_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1500_uart1_groups[] = {
- "uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+ "uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1500_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
-static const char *x1500_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1500_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
static const char *x1500_i2c0_groups[] = { "i2c0-data", };
static const char *x1500_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1500_i2c2_groups[] = { "i2c2-data", };
@@ -1312,7 +1374,8 @@ static const struct function_desc x1500_functions[] = {
{ "uart0", x1500_uart0_groups, ARRAY_SIZE(x1500_uart0_groups), },
{ "uart1", x1500_uart1_groups, ARRAY_SIZE(x1500_uart1_groups), },
{ "uart2", x1500_uart2_groups, ARRAY_SIZE(x1500_uart2_groups), },
- { "mmc0", x1500_mmc0_groups, ARRAY_SIZE(x1500_mmc0_groups), },
+ { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+ { "mmc", x1500_mmc_groups, ARRAY_SIZE(x1500_mmc_groups), },
{ "i2c0", x1500_i2c0_groups, ARRAY_SIZE(x1500_i2c0_groups), },
{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
@@ -1327,6 +1390,8 @@ static const struct function_desc x1500_functions[] = {
static const struct ingenic_chip_info x1500_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1500,
.groups = x1500_groups,
.num_groups = ARRAY_SIZE(x1500_groups),
.functions = x1500_functions,
@@ -1335,6 +1400,222 @@ static const struct ingenic_chip_info x1500_chip_info = {
.pull_downs = x1000_pull_downs,
};
+static const u32 x1830_pull_ups[4] = {
+ 0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static const u32 x1830_pull_downs[4] = {
+ 0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static int x1830_uart0_data_pins[] = { 0x33, 0x36, };
+static int x1830_uart0_hwflow_pins[] = { 0x34, 0x35, };
+static int x1830_uart1_data_pins[] = { 0x38, 0x37, };
+static int x1830_sfc_pins[] = { 0x17, 0x18, 0x1a, 0x19, 0x1b, 0x1c, };
+static int x1830_ssi0_dt_pins[] = { 0x4c, };
+static int x1830_ssi0_dr_pins[] = { 0x4b, };
+static int x1830_ssi0_clk_pins[] = { 0x4f, };
+static int x1830_ssi0_gpc_pins[] = { 0x4d, };
+static int x1830_ssi0_ce0_pins[] = { 0x50, };
+static int x1830_ssi0_ce1_pins[] = { 0x4e, };
+static int x1830_ssi1_dt_c_pins[] = { 0x53, };
+static int x1830_ssi1_dr_c_pins[] = { 0x54, };
+static int x1830_ssi1_clk_c_pins[] = { 0x57, };
+static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
+static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
+static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
+static int x1830_ssi1_dt_d_pins[] = { 0x62, };
+static int x1830_ssi1_dr_d_pins[] = { 0x63, };
+static int x1830_ssi1_clk_d_pins[] = { 0x66, };
+static int x1830_ssi1_gpc_d_pins[] = { 0x64, };
+static int x1830_ssi1_ce0_d_pins[] = { 0x67, };
+static int x1830_ssi1_ce1_d_pins[] = { 0x65, };
+static int x1830_mmc0_1bit_pins[] = { 0x24, 0x25, 0x20, };
+static int x1830_mmc0_4bit_pins[] = { 0x21, 0x22, 0x23, };
+static int x1830_mmc1_1bit_pins[] = { 0x42, 0x43, 0x44, };
+static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
+static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
+static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
+static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_pwm_pwm0_b_pins[] = { 0x31, };
+static int x1830_pwm_pwm0_c_pins[] = { 0x4b, };
+static int x1830_pwm_pwm1_b_pins[] = { 0x32, };
+static int x1830_pwm_pwm1_c_pins[] = { 0x4c, };
+static int x1830_pwm_pwm2_c_8_pins[] = { 0x48, };
+static int x1830_pwm_pwm2_c_13_pins[] = { 0x4d, };
+static int x1830_pwm_pwm3_c_9_pins[] = { 0x49, };
+static int x1830_pwm_pwm3_c_14_pins[] = { 0x4e, };
+static int x1830_pwm_pwm4_c_15_pins[] = { 0x4f, };
+static int x1830_pwm_pwm4_c_25_pins[] = { 0x59, };
+static int x1830_pwm_pwm5_c_16_pins[] = { 0x50, };
+static int x1830_pwm_pwm5_c_26_pins[] = { 0x5a, };
+static int x1830_pwm_pwm6_c_17_pins[] = { 0x51, };
+static int x1830_pwm_pwm6_c_27_pins[] = { 0x5b, };
+static int x1830_pwm_pwm7_c_18_pins[] = { 0x52, };
+static int x1830_pwm_pwm7_c_28_pins[] = { 0x5c, };
+static int x1830_mac_pins[] = {
+ 0x29, 0x30, 0x2f, 0x28, 0x2e, 0x2d, 0x2a, 0x2b, 0x26, 0x27,
+};
+
+static int x1830_uart0_data_funcs[] = { 0, 0, };
+static int x1830_uart0_hwflow_funcs[] = { 0, 0, };
+static int x1830_uart1_data_funcs[] = { 0, 0, };
+static int x1830_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1830_ssi0_dt_funcs[] = { 0, };
+static int x1830_ssi0_dr_funcs[] = { 0, };
+static int x1830_ssi0_clk_funcs[] = { 0, };
+static int x1830_ssi0_gpc_funcs[] = { 0, };
+static int x1830_ssi0_ce0_funcs[] = { 0, };
+static int x1830_ssi0_ce1_funcs[] = { 0, };
+static int x1830_ssi1_dt_c_funcs[] = { 1, };
+static int x1830_ssi1_dr_c_funcs[] = { 1, };
+static int x1830_ssi1_clk_c_funcs[] = { 1, };
+static int x1830_ssi1_gpc_c_funcs[] = { 1, };
+static int x1830_ssi1_ce0_c_funcs[] = { 1, };
+static int x1830_ssi1_ce1_c_funcs[] = { 1, };
+static int x1830_ssi1_dt_d_funcs[] = { 2, };
+static int x1830_ssi1_dr_d_funcs[] = { 2, };
+static int x1830_ssi1_clk_d_funcs[] = { 2, };
+static int x1830_ssi1_gpc_d_funcs[] = { 2, };
+static int x1830_ssi1_ce0_d_funcs[] = { 2, };
+static int x1830_ssi1_ce1_d_funcs[] = { 2, };
+static int x1830_mmc0_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc0_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_i2c0_funcs[] = { 1, 1, };
+static int x1830_i2c1_funcs[] = { 0, 0, };
+static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_pwm_pwm0_b_funcs[] = { 0, };
+static int x1830_pwm_pwm0_c_funcs[] = { 1, };
+static int x1830_pwm_pwm1_b_funcs[] = { 0, };
+static int x1830_pwm_pwm1_c_funcs[] = { 1, };
+static int x1830_pwm_pwm2_c_8_funcs[] = { 0, };
+static int x1830_pwm_pwm2_c_13_funcs[] = { 1, };
+static int x1830_pwm_pwm3_c_9_funcs[] = { 0, };
+static int x1830_pwm_pwm3_c_14_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_15_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_25_funcs[] = { 0, };
+static int x1830_pwm_pwm5_c_16_funcs[] = { 1, };
+static int x1830_pwm_pwm5_c_26_funcs[] = { 0, };
+static int x1830_pwm_pwm6_c_17_funcs[] = { 1, };
+static int x1830_pwm_pwm6_c_27_funcs[] = { 0, };
+static int x1830_pwm_pwm7_c_18_funcs[] = { 1, };
+static int x1830_pwm_pwm7_c_28_funcs[] = { 0, };
+static int x1830_mac_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+
+static const struct group_desc x1830_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow),
+ INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data),
+ INGENIC_PIN_GROUP("sfc", x1830_sfc),
+ INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt),
+ INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr),
+ INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk),
+ INGENIC_PIN_GROUP("ssi0-gpc", x1830_ssi0_gpc),
+ INGENIC_PIN_GROUP("ssi0-ce0", x1830_ssi0_ce0),
+ INGENIC_PIN_GROUP("ssi0-ce1", x1830_ssi0_ce1),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x1830_ssi1_dt_c),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x1830_ssi1_dr_c),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x1830_ssi1_clk_c),
+ INGENIC_PIN_GROUP("ssi1-gpc-c", x1830_ssi1_gpc_c),
+ INGENIC_PIN_GROUP("ssi1-ce0-c", x1830_ssi1_ce0_c),
+ INGENIC_PIN_GROUP("ssi1-ce1-c", x1830_ssi1_ce1_c),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x1830_ssi1_dt_d),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x1830_ssi1_dr_d),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x1830_ssi1_clk_d),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", x1830_ssi1_gpc_d),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", x1830_ssi1_ce0_d),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", x1830_ssi1_ce1_d),
+ INGENIC_PIN_GROUP("mmc0-1bit", x1830_mmc0_1bit),
+ INGENIC_PIN_GROUP("mmc0-4bit", x1830_mmc0_4bit),
+ INGENIC_PIN_GROUP("mmc1-1bit", x1830_mmc1_1bit),
+ INGENIC_PIN_GROUP("mmc1-4bit", x1830_mmc1_4bit),
+ INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
+ INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
+ INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+ INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
+ INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
+ INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
+ INGENIC_PIN_GROUP("pwm1-c", x1830_pwm_pwm1_c),
+ INGENIC_PIN_GROUP("pwm2-c-8", x1830_pwm_pwm2_c_8),
+ INGENIC_PIN_GROUP("pwm2-c-13", x1830_pwm_pwm2_c_13),
+ INGENIC_PIN_GROUP("pwm3-c-9", x1830_pwm_pwm3_c_9),
+ INGENIC_PIN_GROUP("pwm3-c-14", x1830_pwm_pwm3_c_14),
+ INGENIC_PIN_GROUP("pwm4-c-15", x1830_pwm_pwm4_c_15),
+ INGENIC_PIN_GROUP("pwm4-c-25", x1830_pwm_pwm4_c_25),
+ INGENIC_PIN_GROUP("pwm5-c-16", x1830_pwm_pwm5_c_16),
+ INGENIC_PIN_GROUP("pwm5-c-26", x1830_pwm_pwm5_c_26),
+ INGENIC_PIN_GROUP("pwm6-c-17", x1830_pwm_pwm6_c_17),
+ INGENIC_PIN_GROUP("pwm6-c-27", x1830_pwm_pwm6_c_27),
+ INGENIC_PIN_GROUP("pwm7-c-18", x1830_pwm_pwm7_c_18),
+ INGENIC_PIN_GROUP("pwm7-c-28", x1830_pwm_pwm7_c_28),
+ INGENIC_PIN_GROUP("mac", x1830_mac),
+};
+
+static const char *x1830_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *x1830_uart1_groups[] = { "uart1-data", };
+static const char *x1830_sfc_groups[] = { "sfc", };
+static const char *x1830_ssi0_groups[] = {
+ "ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-gpc", "ssi0-ce0", "ssi0-ce1",
+};
+static const char *x1830_ssi1_groups[] = {
+ "ssi1-dt-c", "ssi1-dt-d",
+ "ssi1-dr-c", "ssi1-dr-d",
+ "ssi1-clk-c", "ssi1-clk-d",
+ "ssi1-gpc-c", "ssi1-gpc-d",
+ "ssi1-ce0-c", "ssi1-ce0-d",
+ "ssi1-ce1-c", "ssi1-ce1-d",
+};
+static const char *x1830_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *x1830_i2c0_groups[] = { "i2c0-data", };
+static const char *x1830_i2c1_groups[] = { "i2c1-data", };
+static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
+static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
+static const char *x1830_pwm2_groups[] = { "pwm2-c-8", "pwm2-c-13", };
+static const char *x1830_pwm3_groups[] = { "pwm3-c-9", "pwm3-c-14", };
+static const char *x1830_pwm4_groups[] = { "pwm4-c-15", "pwm4-c-25", };
+static const char *x1830_pwm5_groups[] = { "pwm5-c-16", "pwm5-c-26", };
+static const char *x1830_pwm6_groups[] = { "pwm6-c-17", "pwm6-c-27", };
+static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
+static const char *x1830_mac_groups[] = { "mac", };
+
+static const struct function_desc x1830_functions[] = {
+ { "uart0", x1830_uart0_groups, ARRAY_SIZE(x1830_uart0_groups), },
+ { "uart1", x1830_uart1_groups, ARRAY_SIZE(x1830_uart1_groups), },
+ { "sfc", x1830_sfc_groups, ARRAY_SIZE(x1830_sfc_groups), },
+ { "ssi0", x1830_ssi0_groups, ARRAY_SIZE(x1830_ssi0_groups), },
+ { "ssi1", x1830_ssi1_groups, ARRAY_SIZE(x1830_ssi1_groups), },
+ { "mmc0", x1830_mmc0_groups, ARRAY_SIZE(x1830_mmc0_groups), },
+ { "mmc1", x1830_mmc1_groups, ARRAY_SIZE(x1830_mmc1_groups), },
+ { "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
+ { "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
+ { "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+ { "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
+ { "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
+ { "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
+ { "pwm3", x1830_pwm3_groups, ARRAY_SIZE(x1830_pwm3_groups), },
+ { "pwm4", x1830_pwm4_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm5", x1830_pwm5_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm6", x1830_pwm6_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm7", x1830_pwm7_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), },
+};
+
+static const struct ingenic_chip_info x1830_chip_info = {
+ .num_chips = 4,
+ .reg_offset = 0x1000,
+ .version = ID_X1830,
+ .groups = x1830_groups,
+ .num_groups = ARRAY_SIZE(x1830_groups),
+ .functions = x1830_functions,
+ .num_functions = ARRAY_SIZE(x1830_functions),
+ .pull_ups = x1830_pull_ups,
+ .pull_downs = x1830_pull_downs,
+};
+
static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -1363,12 +1644,14 @@ static void ingenic_gpio_shadow_set_bit(struct ingenic_gpio_chip *jzgc,
else
reg = REG_CLEAR(reg);
- regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_BASE + reg, BIT(offset));
+ regmap_write(jzgc->jzpc->map, REG_PZ_BASE(
+ jzgc->jzpc->info->reg_offset) + reg, BIT(offset));
}
static void ingenic_gpio_shadow_set_bit_load(struct ingenic_gpio_chip *jzgc)
{
- regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_GID2LD,
+ regmap_write(jzgc->jzpc->map, REG_PZ_GID2LD(
+ jzgc->jzpc->info->reg_offset),
jzgc->gc.base / PINS_PER_GPIO_CHIP);
}
@@ -1383,7 +1666,7 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
@@ -1393,58 +1676,42 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
u8 offset, unsigned int type)
{
u8 reg1, reg2;
-
- if (jzgc->jzpc->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
- } else {
- reg1 = JZ4740_GPIO_TRIG;
- reg2 = JZ4740_GPIO_DIR;
- }
+ bool val1, val2;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_set_bit(jzgc, reg1, offset, true);
- }
+ val1 = val2 = true;
break;
case IRQ_TYPE_EDGE_FALLING:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_set_bit(jzgc, reg1, offset, true);
- }
+ val1 = false;
+ val2 = true;
break;
case IRQ_TYPE_LEVEL_HIGH:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_set_bit(jzgc, reg1, offset, false);
- }
+ val1 = true;
+ val2 = false;
break;
case IRQ_TYPE_LEVEL_LOW:
default:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_set_bit(jzgc, reg1, offset, false);
- }
+ val1 = val2 = false;
break;
}
+
+ if (jzgc->jzpc->info->version >= ID_JZ4760) {
+ reg1 = JZ4760_GPIO_PAT1;
+ reg2 = JZ4760_GPIO_PAT0;
+ } else {
+ reg1 = JZ4740_GPIO_TRIG;
+ reg2 = JZ4740_GPIO_DIR;
+ }
+
+ if (jzgc->jzpc->info->version >= ID_X1000) {
+ ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
+ ingenic_gpio_shadow_set_bit_load(jzgc);
+ } else {
+ ingenic_gpio_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, val2);
+ }
}
static void ingenic_gpio_irq_mask(struct irq_data *irqd)
@@ -1469,7 +1736,7 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1485,7 +1752,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
@@ -1510,7 +1777,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
}
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
@@ -1567,7 +1834,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1611,7 +1878,7 @@ static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, offt * 0x100 +
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
}
@@ -1620,14 +1887,15 @@ static inline void ingenic_shadow_config_pin(struct ingenic_pinctrl *jzpc,
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, X1000_GPIO_PZ_BASE +
+ regmap_write(jzpc->map, REG_PZ_BASE(jzpc->info->reg_offset) +
(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
}
static inline void ingenic_shadow_config_pin_load(struct ingenic_pinctrl *jzpc,
unsigned int pin)
{
- regmap_write(jzpc->map, X1000_GPIO_PZ_GID2LD, pin / PINS_PER_GPIO_CHIP);
+ regmap_write(jzpc->map, REG_PZ_GID2LD(jzpc->info->reg_offset),
+ pin / PINS_PER_GPIO_CHIP);
}
static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
@@ -1637,7 +1905,7 @@ static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
unsigned int val;
- regmap_read(jzpc->map, offt * 0x100 + reg, &val);
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset + reg, &val);
return val & BIT(idx);
}
@@ -1648,7 +1916,7 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
@@ -1674,13 +1942,13 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n",
'A' + offt, idx, func);
- if (jzpc->version >= ID_X1000) {
+ if (jzpc->info->version >= ID_X1000) {
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->version >= ID_JZ4760) {
+ } else if (jzpc->info->version >= ID_JZ4760) {
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
@@ -1733,12 +2001,12 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n",
'A' + offt, idx, input ? "in" : "out");
- if (jzpc->version >= ID_X1000) {
+ if (jzpc->info->version >= ID_X1000) {
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->version >= ID_JZ4760) {
+ } else if (jzpc->info->version >= ID_JZ4760) {
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
@@ -1768,7 +2036,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -1798,18 +2066,37 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
}
static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
- unsigned int pin, bool enabled)
+ unsigned int pin, unsigned int bias)
{
- if (jzpc->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !enabled);
- else
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
+ if (jzpc->info->version >= ID_X1830) {
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int half = PINS_PER_GPIO_CHIP / 2;
+ unsigned int idxh = pin % half * 2;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+ if (idx < half) {
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(X1830_GPIO_PEL), 3 << idxh);
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(X1830_GPIO_PEL), bias << idxh);
+ } else {
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(X1830_GPIO_PEH), 3 << idxh);
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(X1830_GPIO_PEH), bias << idxh);
+ }
+
+ } else if (jzpc->info->version >= ID_JZ4760) {
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+ }
}
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
@@ -1843,7 +2130,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, false);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_DIS);
break;
case PIN_CONFIG_BIAS_PULL_UP:
@@ -1851,7 +2138,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
dev_dbg(jzpc->dev, "set pull-up for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, true);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_UP);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
@@ -1859,7 +2146,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
dev_dbg(jzpc->dev, "set pull-down for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, true);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_DOWN);
break;
case PIN_CONFIG_OUTPUT:
@@ -1939,25 +2226,13 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
.reg_stride = 4,
};
-static const struct of_device_id ingenic_pinctrl_of_match[] = {
- { .compatible = "ingenic,jz4740-pinctrl", .data = (void *) ID_JZ4740 },
- { .compatible = "ingenic,jz4725b-pinctrl", .data = (void *)ID_JZ4725B },
- { .compatible = "ingenic,jz4760-pinctrl", .data = (void *) ID_JZ4760 },
- { .compatible = "ingenic,jz4760b-pinctrl", .data = (void *) ID_JZ4760B },
- { .compatible = "ingenic,jz4770-pinctrl", .data = (void *) ID_JZ4770 },
- { .compatible = "ingenic,jz4780-pinctrl", .data = (void *) ID_JZ4780 },
- { .compatible = "ingenic,x1000-pinctrl", .data = (void *) ID_X1000 },
- { .compatible = "ingenic,x1000e-pinctrl", .data = (void *) ID_X1000E },
- { .compatible = "ingenic,x1500-pinctrl", .data = (void *) ID_X1500 },
- {},
-};
-
static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-gpio", },
{ .compatible = "ingenic,jz4760-gpio", },
{ .compatible = "ingenic,jz4770-gpio", },
{ .compatible = "ingenic,jz4780-gpio", },
{ .compatible = "ingenic,x1000-gpio", },
+ { .compatible = "ingenic,x1830-gpio", },
{},
};
@@ -1981,7 +2256,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
return -ENOMEM;
jzgc->jzpc = jzpc;
- jzgc->reg_base = bank * 0x100;
+ jzgc->reg_base = bank * jzpc->info->reg_offset;
jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
if (!jzgc->gc.label)
@@ -2048,9 +2323,6 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
struct ingenic_pinctrl *jzpc;
struct pinctrl_desc *pctl_desc;
void __iomem *base;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- const struct of_device_id *of_id = of_match_device(
- ingenic_pinctrl_of_match, dev);
const struct ingenic_chip_info *chip_info;
struct device_node *node;
unsigned int i;
@@ -2060,8 +2332,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
if (!jzpc)
return -ENOMEM;
- base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -2073,31 +2344,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
jzpc->dev = dev;
-
- if (of_id)
- jzpc->version = (enum jz_version)of_id->data;
- else
- jzpc->version = (enum jz_version)id->driver_data;
-
- if (jzpc->version >= ID_X1500)
- chip_info = &x1500_chip_info;
- else if (jzpc->version >= ID_X1000E)
- chip_info = &x1000e_chip_info;
- else if (jzpc->version >= ID_X1000)
- chip_info = &x1000_chip_info;
- else if (jzpc->version >= ID_JZ4780)
- chip_info = &jz4780_chip_info;
- else if (jzpc->version >= ID_JZ4770)
- chip_info = &jz4770_chip_info;
- else if (jzpc->version >= ID_JZ4760B)
- chip_info = &jz4760b_chip_info;
- else if (jzpc->version >= ID_JZ4760)
- chip_info = &jz4760_chip_info;
- else if (jzpc->version >= ID_JZ4725B)
- chip_info = &jz4725b_chip_info;
- else
- chip_info = &jz4740_chip_info;
- jzpc->info = chip_info;
+ jzpc->info = chip_info = of_device_get_match_data(dev);
pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
if (!pctl_desc)
@@ -2166,25 +2413,25 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id ingenic_pinctrl_ids[] = {
- { "jz4740-pinctrl", ID_JZ4740 },
- { "jz4725b-pinctrl", ID_JZ4725B },
- { "jz4760-pinctrl", ID_JZ4760 },
- { "jz4760b-pinctrl", ID_JZ4760B },
- { "jz4770-pinctrl", ID_JZ4770 },
- { "jz4780-pinctrl", ID_JZ4780 },
- { "x1000-pinctrl", ID_X1000 },
- { "x1000e-pinctrl", ID_X1000E },
- { "x1500-pinctrl", ID_X1500 },
+static const struct of_device_id ingenic_pinctrl_of_match[] = {
+ { .compatible = "ingenic,jz4740-pinctrl", .data = &jz4740_chip_info },
+ { .compatible = "ingenic,jz4725b-pinctrl", .data = &jz4725b_chip_info },
+ { .compatible = "ingenic,jz4760-pinctrl", .data = &jz4760_chip_info },
+ { .compatible = "ingenic,jz4760b-pinctrl", .data = &jz4760_chip_info },
+ { .compatible = "ingenic,jz4770-pinctrl", .data = &jz4770_chip_info },
+ { .compatible = "ingenic,jz4780-pinctrl", .data = &jz4780_chip_info },
+ { .compatible = "ingenic,x1000-pinctrl", .data = &x1000_chip_info },
+ { .compatible = "ingenic,x1000e-pinctrl", .data = &x1000_chip_info },
+ { .compatible = "ingenic,x1500-pinctrl", .data = &x1500_chip_info },
+ { .compatible = "ingenic,x1830-pinctrl", .data = &x1830_chip_info },
{},
};
static struct platform_driver ingenic_pinctrl_driver = {
.driver = {
.name = "pinctrl-ingenic",
- .of_match_table = of_match_ptr(ingenic_pinctrl_of_match),
+ .of_match_table = ingenic_pinctrl_of_match,
},
- .id_table = ingenic_pinctrl_ids,
};
static int __init ingenic_pinctrl_drv_register(void)
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 215db220d795..617585be6a7d 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1229,8 +1229,8 @@ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
pinctrl_add_gpio_range(rza1_pctl->pctl, range);
- dev_info(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
- chip->label, chip->ngpio);
+ dev_dbg(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
+ chip->label, chip->ngpio);
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5d6f9f61ce02..9a8daa256a32 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -960,7 +960,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned long flags;
/*
* While they may not wake up when the TLMM is powered off,
@@ -971,12 +970,8 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (d->parent_data)
irq_chip_set_wake_parent(d, on);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
-
irq_set_irq_wake(pctrl->irq, on);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index e1259ce27396..183f0b2d9f8e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -589,7 +589,7 @@ static const char * const blsp_uart5_groups[] = {
static const char * const qdss_traceclk_a_groups[] = {
"gpio46",
};
-const char * const m_voc_groups[] = {
+static const char * const m_voc_groups[] = {
"gpio123", "gpio124",
};
static const char * const blsp_i2c5_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index d6cfad7417b1..1b6465a882f2 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -456,14 +456,18 @@ enum sc7180_functions {
msm_mux_qspi_data,
msm_mux_qup00,
msm_mux_qup01,
- msm_mux_qup02,
+ msm_mux_qup02_i2c,
+ msm_mux_qup02_uart,
msm_mux_qup03,
- msm_mux_qup04,
+ msm_mux_qup04_i2c,
+ msm_mux_qup04_uart,
msm_mux_qup05,
msm_mux_qup10,
- msm_mux_qup11,
+ msm_mux_qup11_i2c,
+ msm_mux_qup11_uart,
msm_mux_qup12,
- msm_mux_qup13,
+ msm_mux_qup13_i2c,
+ msm_mux_qup13_uart,
msm_mux_qup14,
msm_mux_qup15,
msm_mux_sdc1_tb,
@@ -543,7 +547,10 @@ static const char * const sdc1_tb_groups[] = {
static const char * const sdc2_tb_groups[] = {
"gpio5",
};
-static const char * const qup11_groups[] = {
+static const char * const qup11_i2c_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const qup11_uart_groups[] = {
"gpio6", "gpio7",
};
static const char * const ddr_bist_groups[] = {
@@ -593,7 +600,10 @@ static const char * const qdss_groups[] = {
static const char * const pll_reset_groups[] = {
"gpio14",
};
-static const char * const qup02_groups[] = {
+static const char * const qup02_i2c_groups[] = {
+ "gpio15", "gpio16",
+};
+static const char * const qup02_uart_groups[] = {
"gpio15", "gpio16",
};
static const char * const cci_i2c_groups[] = {
@@ -698,7 +708,10 @@ static const char * const wlan1_adc1_groups[] = {
static const char * const atest_usb13_groups[] = {
"gpio44",
};
-static const char * const qup13_groups[] = {
+static const char * const qup13_i2c_groups[] = {
+ "gpio46", "gpio47",
+};
+static const char * const qup13_uart_groups[] = {
"gpio46", "gpio47",
};
static const char * const gcc_gp1_groups[] = {
@@ -848,7 +861,10 @@ static const char * const usb_phy_groups[] = {
static const char * const mss_lte_groups[] = {
"gpio108", "gpio109",
};
-static const char * const qup04_groups[] = {
+static const char * const qup04_i2c_groups[] = {
+ "gpio115", "gpio116",
+};
+static const char * const qup04_uart_groups[] = {
"gpio115", "gpio116",
};
@@ -929,14 +945,18 @@ static const struct msm_function sc7180_functions[] = {
FUNCTION(qspi_data),
FUNCTION(qup00),
FUNCTION(qup01),
- FUNCTION(qup02),
+ FUNCTION(qup02_i2c),
+ FUNCTION(qup02_uart),
FUNCTION(qup03),
- FUNCTION(qup04),
+ FUNCTION(qup04_i2c),
+ FUNCTION(qup04_uart),
FUNCTION(qup05),
FUNCTION(qup10),
- FUNCTION(qup11),
+ FUNCTION(qup11_i2c),
+ FUNCTION(qup11_uart),
FUNCTION(qup12),
- FUNCTION(qup13),
+ FUNCTION(qup13_i2c),
+ FUNCTION(qup13_uart),
FUNCTION(qup14),
FUNCTION(qup15),
FUNCTION(sdc1_tb),
@@ -976,8 +996,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[3] = PINGROUP(3, SOUTH, qup01, sp_cmu, dbg_out, qdss_cti, _, _, _, _, _),
[4] = PINGROUP(4, NORTH, sdc1_tb, _, qdss_cti, _, _, _, _, _, _),
[5] = PINGROUP(5, NORTH, sdc2_tb, _, _, _, _, _, _, _, _),
- [6] = PINGROUP(6, NORTH, qup11, qup11, _, _, _, _, _, _, _),
- [7] = PINGROUP(7, NORTH, qup11, qup11, ddr_bist, _, _, _, _, _, _),
+ [6] = PINGROUP(6, NORTH, qup11_i2c, qup11_uart, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, NORTH, qup11_i2c, qup11_uart, ddr_bist, _, _, _, _, _, _),
[8] = PINGROUP(8, NORTH, gp_pdm1, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _),
[9] = PINGROUP(9, NORTH, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _, _),
[10] = PINGROUP(10, NORTH, mdp_vsync, ddr_bist, _, _, _, _, _, _, _),
@@ -985,8 +1005,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, qup01, _, phase_flag, wlan2_adc0, atest_usb10, ddr_pxi3, _),
[13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss, _, _, _, _, _, _),
[14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss, _, _, _, _, _, _),
- [15] = PINGROUP(15, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
- [16] = PINGROUP(16, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
+ [15] = PINGROUP(15, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
+ [16] = PINGROUP(16, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
[17] = PINGROUP(17, SOUTH, cci_i2c, _, phase_flag, qdss, _, wlan1_adc0, atest_usb12, ddr_pxi1, atest_char),
[18] = PINGROUP(18, SOUTH, cci_i2c, agera_pll, _, phase_flag, qdss, vsense_trigger, ddr_pxi0, atest_char3, _),
[19] = PINGROUP(19, SOUTH, cci_i2c, _, phase_flag, qdss, atest_char2, _, _, _, _),
@@ -1016,8 +1036,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[43] = PINGROUP(43, NORTH, qup12, _, _, _, _, _, _, _, _),
[44] = PINGROUP(44, NORTH, qup12, _, phase_flag, qdss_cti, wlan1_adc1, atest_usb13, ddr_pxi1, _, _),
[45] = PINGROUP(45, NORTH, qup12, qdss_cti, _, _, _, _, _, _, _),
- [46] = PINGROUP(46, NORTH, qup13, qup13, _, _, _, _, _, _, _),
- [47] = PINGROUP(47, NORTH, qup13, qup13, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
[48] = PINGROUP(48, NORTH, gcc_gp1, _, _, _, _, _, _, _, _),
[49] = PINGROUP(49, WEST, mi2s_1, btfm_slimbus, _, _, _, _, _, _, _),
[50] = PINGROUP(50, WEST, mi2s_1, btfm_slimbus, gp_pdm1, _, _, _, _, _, _),
@@ -1085,8 +1105,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[112] = PINGROUP(112, NORTH, _, _, _, _, _, _, _, _, _),
[113] = PINGROUP(113, NORTH, _, _, _, _, _, _, _, _, _),
[114] = PINGROUP(114, NORTH, _, _, _, _, _, _, _, _, _),
- [115] = PINGROUP(115, WEST, qup04, qup04, _, _, _, _, _, _, _),
- [116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
[119] = UFS_RESET(ufs_reset, 0x7f000),
@@ -1099,6 +1119,22 @@ static const struct msm_pingroup sc7180_groups[] = {
[126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sc7180_pdc_map[] = {
+ {0, 40}, {3, 50}, {4, 42}, {5, 70}, {6, 41}, {9, 35},
+ {10, 80}, {11, 51}, {16, 20}, {21, 55}, {22, 90}, {23, 21},
+ {24, 61}, {26, 52}, {28, 36}, {30, 100}, {31, 33}, {32, 81},
+ {33, 62}, {34, 43}, {36, 91}, {37, 53}, {38, 63}, {39, 72},
+ {41, 101}, {42, 7}, {43, 34}, {45, 73}, {47, 82}, {49, 17},
+ {52, 109}, {53, 102}, {55, 92}, {56, 56}, {57, 57}, {58, 83},
+ {59, 37}, {62, 110}, {63, 111}, {64, 74}, {65, 44}, {66, 93},
+ {67, 58}, {68, 112}, {69, 32}, {70, 54}, {72, 59}, {73, 64},
+ {74, 71}, {78, 31}, {82, 30}, {85, 103}, {86, 38}, {87, 39},
+ {88, 45}, {89, 46}, {90, 47}, {91, 48}, {92, 60}, {93, 49},
+ {94, 84}, {95, 94}, {98, 65}, {101, 66}, {104, 67}, {109, 104},
+ {110, 68}, {113, 69}, {114, 113}, {115, 108}, {116, 121},
+ {117, 114}, {118, 119},
+};
+
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.pins = sc7180_pins,
.npins = ARRAY_SIZE(sc7180_pins),
@@ -1109,6 +1145,8 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.ngpios = 120,
.tiles = sc7180_tiles,
.ntiles = ARRAY_SIZE(sc7180_tiles),
+ .wakeirq_map = sc7180_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
};
static int sc7180_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 653d1095bfea..fe0be8a6ebb7 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1060,7 +1060,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
- girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index dca86886b1f9..fba1d41d20ec 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -439,7 +439,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
.pin_config_group_set = pm8xxx_pin_config_set,
};
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
.name = "pm8xxx_gpio",
.pctlops = &pm8xxx_pinctrl_ops,
.pmxops = &pm8xxx_pinmux_ops,
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
- girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 3d8b1d74fa2f..681d8dcf37e3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -430,7 +430,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
.pin_config_group_set = pm8xxx_pin_config_set,
};
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
.name = "pm8xxx_mpp",
.pctlops = &pm8xxx_pinctrl_ops,
.pmxops = &pm8xxx_pinmux_ops,
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 425fadd6c346..dfd805e76862 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,30 +4,34 @@
#
config PINCTRL_SAMSUNG
bool
+ depends on OF_GPIO
select PINMUX
select PINCONF
config PINCTRL_EXYNOS
- bool "Pinctrl driver data for Samsung EXYNOS SoCs"
- depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
+ bool "Pinctrl common driver part for Samsung Exynos SoCs"
+ depends on OF_GPIO
+ depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
config PINCTRL_EXYNOS_ARM
- bool "ARMv7-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ bool "ARMv7-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_EXYNOS_ARM64
- bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ bool "ARMv8-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX && OF
+ depends on OF_GPIO
+ depends on ARCH_S3C24XX || COMPILE_TEST
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
- depends on ARCH_S3C64XX
+ depends on OF_GPIO
+ depends on ARCH_S3C64XX || COMPILE_TEST
select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 28d66e7cb098..cf0e0dc42b84 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -26,8 +26,9 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
- select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77950 if ARCH_R8A77950 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77951 if ARCH_R8A77951 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960
select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
@@ -115,8 +116,11 @@ config PINCTRL_PFC_R8A7793
config PINCTRL_PFC_R8A7794
bool "R-Car E2 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7795
- bool "R-Car H3 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77950
+ bool "R-Car H3 ES1.x pin control support" if COMPILE_TEST
+
+config PINCTRL_PFC_R8A77951
+ bool "R-Car H3 ES2.0+ pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 3bc05666e1a6..9ebe321d24c4 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -18,8 +18,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7792) += pfc-r8a7792.o
obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77950) += pfc-r8a77950.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77951) += pfc-r8a77951.o
obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 65e52688f091..82209116955b 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/psci.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include "core.h"
@@ -568,18 +569,18 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7794_pinmux_info,
},
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7795
+/* Both r8a7795 entries must be present to make sanity checks work */
+#ifdef CONFIG_PINCTRL_PFC_R8A77950
{
.compatible = "renesas,pfc-r8a7795",
- .data = &r8a7795_pinmux_info,
+ .data = &r8a77950_pinmux_info,
},
-#ifdef DEBUG
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
{
- /* For sanity checks only (nothing matches against this) */
- .compatible = "renesas,pfc-r8a77950", /* R-Car H3 ES1.0 */
- .data = &r8a7795es1_pinmux_info,
+ .compatible = "renesas,pfc-r8a7795",
+ .data = &r8a77951_pinmux_info,
},
-#endif /* DEBUG */
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
@@ -886,19 +887,49 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
static inline void sh_pfc_check_driver(struct platform_driver *pdrv) {}
#endif /* !DEBUG */
+#ifdef CONFIG_OF
+static const void *sh_pfc_quirk_match(void)
+{
+#if defined(CONFIG_PINCTRL_PFC_R8A77950) || \
+ defined(CONFIG_PINCTRL_PFC_R8A77951)
+ const struct soc_device_attribute *match;
+ static const struct soc_device_attribute quirks[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = &r8a77950_pinmux_info,
+ },
+ {
+ .soc_id = "r8a7795",
+ .data = &r8a77951_pinmux_info,
+ },
+
+ { /* sentinel */ }
+ };
+
+ match = soc_device_match(quirks);
+ if (match)
+ return match->data ?: ERR_PTR(-ENODEV);
+#endif /* CONFIG_PINCTRL_PFC_R8A77950 || CONFIG_PINCTRL_PFC_R8A77951 */
+
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
- struct device_node *np = pdev->dev.of_node;
-#endif
const struct sh_pfc_soc_info *info;
struct sh_pfc *pfc;
int ret;
#ifdef CONFIG_OF
- if (np)
- info = of_device_get_match_data(&pdev->dev);
- else
+ if (pdev->dev.of_node) {
+ info = sh_pfc_quirk_match();
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (!info)
+ info = of_device_get_match_data(&pdev->dev);
+ } else
#endif
info = (const void *)platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 5a55b8da7919..8213e118aa40 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -386,12 +386,11 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
}
/* Register the function GPIOs chip. */
- if (pfc->info->nr_func_gpios == 0)
- return 0;
-
- chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
+ if (pfc->info->nr_func_gpios) {
+ chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ }
#endif /* CONFIG_PINCTRL_SH_FUNC_GPIO */
return 0;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 24866a5958ae..a9875038ed9b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATAG0_A, 0, FN_REMOCON_B, 0,
/* IP0_11_8 [4] */
FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
+ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
FN_PWM4_B, 0, 0, 0,
0, 0, 0, 0,
/* IP0_7_5 [3] */
@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TS_SDAT0_A, 0, 0, 0,
0, 0, 0, 0,
/* IP1_10_8 [3] */
- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
+ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
/* IP1_7_5 [3] */
FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
index ad05da8f6516..04812e62f3a4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7795 ES1.x processor support - PFC hardware block.
+ * R8A77950 processor support - PFC hardware block.
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
@@ -5562,8 +5562,8 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
-static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
- u32 *pocctrl)
+static int r8a77950_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+ u32 *pocctrl)
{
int bit = -EINVAL;
@@ -5820,8 +5820,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
+static unsigned int r8a77950_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
@@ -5838,8 +5838,8 @@ static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
return PIN_CONFIG_BIAS_PULL_DOWN;
}
-static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void r8a77950_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
{
const struct pinmux_bias_reg *reg;
u32 enable, updown;
@@ -5861,15 +5861,15 @@ static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
sh_pfc_write(pfc, reg->puen, enable);
}
-static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
- .pin_to_pocctrl = r8a7795es1_pin_to_pocctrl,
- .get_bias = r8a7795es1_pinmux_get_bias,
- .set_bias = r8a7795es1_pinmux_set_bias,
+static const struct sh_pfc_soc_operations r8a77950_pinmux_ops = {
+ .pin_to_pocctrl = r8a77950_pin_to_pocctrl,
+ .get_bias = r8a77950_pinmux_get_bias,
+ .set_bias = r8a77950_pinmux_set_bias,
};
-const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
+const struct sh_pfc_soc_info r8a77950_pinmux_info = {
.name = "r8a77950_pfc",
- .ops = &r8a7795es1_pinmux_ops,
+ .ops = &r8a77950_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
index d3145aa135d0..256fab4b03d3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7795 ES2.0+ processor support - PFC hardware block.
+ * R8A77951 processor support - PFC hardware block.
*
* Copyright (C) 2015-2019 Renesas Electronics Corporation
*/
@@ -5915,7 +5915,8 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
-static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+static int r8a77951_pin_to_pocctrl(struct sh_pfc *pfc,
+ unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
@@ -6172,8 +6173,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
+static unsigned int r8a77951_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
@@ -6190,8 +6191,8 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
return PIN_CONFIG_BIAS_PULL_DOWN;
}
-static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void r8a77951_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
{
const struct pinmux_bias_reg *reg;
u32 enable, updown;
@@ -6213,29 +6214,15 @@ static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
sh_pfc_write(pfc, reg->puen, enable);
}
-static const struct soc_device_attribute r8a7795es1[] = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
- { /* sentinel */ }
+static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = {
+ .pin_to_pocctrl = r8a77951_pin_to_pocctrl,
+ .get_bias = r8a77951_pinmux_get_bias,
+ .set_bias = r8a77951_pinmux_set_bias,
};
-static int r8a7795_pinmux_init(struct sh_pfc *pfc)
-{
- if (soc_device_match(r8a7795es1))
- pfc->info = &r8a7795es1_pinmux_info;
-
- return 0;
-}
-
-static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = {
- .init = r8a7795_pinmux_init,
- .pin_to_pocctrl = r8a7795_pin_to_pocctrl,
- .get_bias = r8a7795_pinmux_get_bias,
- .set_bias = r8a7795_pinmux_set_bias,
-};
-
-const struct sh_pfc_soc_info r8a7795_pinmux_info = {
+const struct sh_pfc_soc_info r8a77951_pinmux_info = {
.name = "r8a77951_pfc",
- .ops = &r8a7795_pinmux_ops,
+ .ops = &r8a77951_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 8bdf33c807f6..6616f5210b9d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -5998,7 +5998,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
- { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
+ { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
{ PIN_FSCLKST, 20, 2 }, /* FSCLKST */
{ PIN_TMS, 4, 2 }, /* TMS */
} },
@@ -6254,8 +6254,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
} },
{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
- [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
- [ 1] = SH_PFC_PIN_NONE,
+ [ 0] = SH_PFC_PIN_NONE,
+ [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
[ 2] = PIN_FSCLKST, /* FSCLKST */
[ 3] = PIN_EXTALR, /* EXTALR*/
[ 4] = PIN_TRST_N, /* TRST# */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 4a95867deb8a..908837ea487b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -497,17 +497,15 @@ enum {
SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
CRX0_MARK, CRX1_MARK,
CTX0_MARK, CTX1_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
IERXD_MARK, IETXD_MARK,
- CRX0_CRX1_MARK,
WDTOVF_MARK,
- CRX0X1_MARK,
-
/* DMAC */
TEND0_MARK, DACK0_MARK, DREQ0_MARK,
TEND1_MARK, DACK1_MARK, DREQ1_MARK,
@@ -995,12 +993,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(PJ3_DATA, PJ3MD_00),
PINMUX_DATA(CRX1_MARK, PJ3MD_01),
- PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(CTX1_MARK, PJ2MD_001),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+ PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
PINMUX_DATA(CS2_MARK, PJ2MD_011),
PINMUX_DATA(SCK0_MARK, PJ2MD_100),
PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
@@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0),
GPIO_FN(CRX0_CRX1),
@@ -2020,18 +2019,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
- PJ11_IN, PJ11_OUT,
- PJ10_IN, PJ10_OUT,
- PJ9_IN, PJ9_OUT,
- PJ8_IN, PJ8_OUT,
- PJ7_IN, PJ7_OUT,
- PJ6_IN, PJ6_OUT,
- PJ5_IN, PJ5_OUT,
- PJ4_IN, PJ4_OUT,
- PJ3_IN, PJ3_OUT,
- PJ2_IN, PJ2_OUT,
- PJ1_IN, PJ1_OUT,
- PJ0_IN, PJ0_OUT ))
+ PK11_IN, PK11_OUT,
+ PK10_IN, PK10_OUT,
+ PK9_IN, PK9_OUT,
+ PK8_IN, PK8_OUT,
+ PK7_IN, PK7_OUT,
+ PK6_IN, PK6_OUT,
+ PK5_IN, PK5_OUT,
+ PK4_IN, PK4_OUT,
+ PK3_IN, PK3_OUT,
+ PK2_IN, PK2_OUT,
+ PK1_IN, PK1_OUT,
+ PK0_IN, PK0_OUT ))
},
{}
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index 6cbb18ef77dc..d20974a55d93 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -737,13 +737,12 @@ enum {
CRX0_MARK, CTX0_MARK,
CRX1_MARK, CTX1_MARK,
CRX2_MARK, CTX2_MARK,
- CRX0_CRX1_MARK,
- CRX0_CRX1_CRX2_MARK,
- CTX0CTX1CTX2_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
+ CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
CRX1_PJ22_MARK, CTX1_PJ23_MARK,
CRX2_PJ20_MARK, CTX2_PJ21_MARK,
- CRX0CRX1_PJ22_MARK,
- CRX0CRX1CRX2_PJ20_MARK,
+ CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
+ CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
/* VDC */
DV_CLK_MARK,
@@ -821,6 +820,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CS3_MARK, PC8MD_001),
PINMUX_DATA(TXD7_MARK, PC8MD_010),
PINMUX_DATA(CTX1_MARK, PC8MD_011),
+ PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
PINMUX_DATA(PC7_DATA, PC7MD_000),
PINMUX_DATA(CKE_MARK, PC7MD_001),
@@ -833,11 +833,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CAS_MARK, PC6MD_001),
PINMUX_DATA(SCK7_MARK, PC6MD_010),
PINMUX_DATA(CTX0_MARK, PC6MD_011),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
PINMUX_DATA(PC5_DATA, PC5MD_000),
PINMUX_DATA(RAS_MARK, PC5MD_001),
PINMUX_DATA(CRX0_MARK, PC5MD_011),
- PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
PINMUX_DATA(PC4_DATA, PC4MD_00),
@@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
- PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
- PINMUX_DATA(CRX1_MARK, PJ22MD_101),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+ PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
+ PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
- PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
- PINMUX_DATA(CRX2_MARK, PJ20MD_101),
- PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+ PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
+ PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
@@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(WDTOVF),
/* CAN */
+ GPIO_FN(CTX2),
+ GPIO_FN(CRX2),
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
GPIO_FN(CRX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0_CRX1),
+ GPIO_FN(CTX0_CTX1_CTX2),
GPIO_FN(CRX0_CRX1_CRX2),
+ GPIO_FN(CTX2_PJ21),
+ GPIO_FN(CRX2_PJ20),
+ GPIO_FN(CTX1_PJ23),
+ GPIO_FN(CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_PJ23),
+ GPIO_FN(CRX0_CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_CTX2_PJ21),
+ GPIO_FN(CRX0_CRX1_CRX2_PJ20),
/* DMAC */
GPIO_FN(TEND0),
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 640d2a4cb838..d57e633e99c0 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -318,8 +318,8 @@ extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
extern const struct sh_pfc_soc_info r8a7792_pinmux_info;
extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77950_pinmux_info __weak;
+extern const struct sh_pfc_soc_info r8a77951_pinmux_info __weak;
extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index ec0d34c33903..b0882d120765 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics 2017
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index a78d7b922ef4..31d62bbb7f43 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-sunxi.h"
@@ -549,7 +548,17 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
{
- switch (of_irq_count(pdev->dev.of_node)) {
+ int ret;
+
+ ret = platform_irq_count(pdev);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't determine irq count: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ switch (ret) {
case 2:
dev_warn(&pdev->dev,
"Your device tree's pinctrl node is broken, which has no IRQ of PG bank routed.\n");
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 692d8b3e2a20..cefbbb8d1a68 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -648,7 +648,7 @@ static int tegra_pinctrl_suspend(struct device *dev)
{
struct tegra_pmx *pmx = dev_get_drvdata(dev);
u32 *backup_regs = pmx->backup_regs;
- u32 *regs;
+ u32 __iomem *regs;
size_t bank_size;
unsigned int i, k;
@@ -666,7 +666,7 @@ static int tegra_pinctrl_resume(struct device *dev)
{
struct tegra_pmx *pmx = dev_get_drvdata(dev);
u32 *backup_regs = pmx->backup_regs;
- u32 *regs;
+ u32 __iomem *regs;
size_t bank_size;
unsigned int i, k;
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
index bb0edf51dfda..5731d1b60e28 100644
--- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
return ret;
}
- if (response->status) {
- dev_err(ec->dev,
- "EC reported failure sending keyboard LEDs command: %d",
- response->status);
- return -EIO;
- }
-
return 0;
}
@@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
+ int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
@@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
request.mode = WILCO_KBBL_MODE_FLAG_PWM;
request.percent = brightness;
- return send_kbbl_msg(ec, &request, &response);
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response.status);
+ return -EIO;
+ }
+
+ return 0;
}
static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
@@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec)
if (ret < 0)
return ret;
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response.status);
+ return -EIO;
+ }
+
if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
return response.percent;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 9a5c9fd2dbc6..5739a9669b29 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
- * @spin_lock: spin lock
+ * @spin_lock: Tx/Rx spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
@@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
- spinlock_t spin_lock; /* spin lock */
+ spinlock_t spin_lock[2]; /* spin lock */
bool is_ready;
};
@@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
/* Use spin-lock to protect the 'cons->tx_buf'. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
@@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
}
}
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
}
/* Rx/Tx one word in the descriptor buffer. */
@@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
fifo->vring[is_rx] = NULL;
/* Notify upper layer that packet is done. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
}
mlxbf_tmfifo_desc_done:
@@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
@@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
if (!fifo)
return -ENOMEM;
- spin_lock_init(&fifo->spin_lock);
+ spin_lock_init(&fifo->spin_lock[0]);
+ spin_lock_init(&fifo->spin_lock[1]);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 706207d192ae..77be37a1fbcf 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
item = pdata->items;
for (i = 0; i < pdata->counter; i++, item++) {
+ if (item->capability) {
+ /*
+ * Read group capability register to get actual number
+ * of interrupt capable components and set group mask
+ * accordingly.
+ */
+ ret = regmap_read(priv->regmap, item->capability,
+ &regval);
+ if (ret)
+ goto out;
+
+ item->mask = GENMASK((regval & item->mask) - 1, 0);
+ }
+
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 27d5b40fb717..587403c44598 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -997,7 +997,6 @@ config INTEL_SCU_IPC
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
depends on INTEL_SCU_IPC
- default y
---help---
The IPC Util driver provides an interface with the SCU enabling
low level access for debug work and updating the firmware. Say
@@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM
depends on PCI && IOSF_MBI && PM
help
Power-management driver for Intel's Image Signal Processor found on
- Bay and Cherry Trail devices. This dummy driver's sole purpose is to
- turn the ISP off (put it in D3) to save power and to allow entering
- of S0ix modes.
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
To compile this driver as a module, choose M here: the module
will be called intel_atomisp2_pm.
@@ -1337,6 +1336,17 @@ config PCENGINES_APU2
To compile this driver as a module, choose M here: the module
will be called pcengines-apuv2.
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of uncore frequency limits on
+ supported server platforms.
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
source "drivers/platform/x86/intel_speed_select_if/Kconfig"
config SYSTEM76_ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 42d85a00be4e..3747b1f07cf1 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index b361c73636a4..6f12747a359a 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 821b08e01635..43bb15e05529 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
#define NOTIFY_KBD_FBM 0x99
+#define NOTIFY_KBD_TTP 0xae
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
@@ -81,6 +82,10 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02
#define ASUS_FAN_BOOST_MODES_MASK 0x03
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
+
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -198,6 +203,9 @@ struct asus_wmi {
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ bool throttle_thermal_policy_available;
+ u8 throttle_thermal_policy_mode;
+
// The RSOC controls the maximum charging percentage.
bool battery_rsoc_available;
@@ -512,13 +520,7 @@ static void kbd_led_update(struct asus_wmi *asus)
{
int ctrl_param = 0;
- /*
- * bits 0-2: level
- * bit 7: light on/off
- */
- if (asus->kbd_led_wk > 0)
- ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
@@ -1724,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev,
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(fan_boost_mode);
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->throttle_thermal_policy_available = false;
+
+ err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+ asus->throttle_thermal_policy_available = true;
+
+ return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+ int err;
+ u8 value;
+ u32 retval;
+
+ value = asus->throttle_thermal_policy_mode;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ value, &retval);
+ if (err) {
+ pr_warn("Failed to set throttle thermal policy: %d\n", err);
+ return err;
+ }
+
+ if (retval != 1) {
+ pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+ retval);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+ u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ return throttle_thermal_policy_write(asus);
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mode = asus->throttle_thermal_policy_mode;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result;
+ u8 new_mode;
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou8(buf, 10, &new_mode);
+ if (result < 0)
+ return result;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ return -EINVAL;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ throttle_thermal_policy_write(asus);
+
+ return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
/* Backlight ******************************************************************/
static int read_backlight_power(struct asus_wmi *asus)
@@ -2005,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
+ if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
+ throttle_thermal_policy_switch_next(asus);
+ return;
+ }
+
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
return;
@@ -2155,6 +2263,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
NULL
};
@@ -2178,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_ALS_ENABLE;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+ ok = asus->throttle_thermal_policy_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -2437,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_fan_boost_mode;
+ err = throttle_thermal_policy_check_present(asus);
+ if (err)
+ goto fail_throttle_thermal_policy;
+ else
+ throttle_thermal_policy_set_default(asus);
+
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
@@ -2521,6 +2638,7 @@ fail_hwmon:
fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
+fail_throttle_thermal_policy:
fail_fan_boost_mode:
fail_platform:
kfree(asus);
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index be85ed966bf3..b471b86c28fe 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -16,17 +16,27 @@
#define MAX_SPEED 3
-static int temp_limits[3] = { 55000, 60000, 65000 };
+#define TEMP_LIMIT0_DEFAULT 55000
+#define TEMP_LIMIT1_DEFAULT 60000
+#define TEMP_LIMIT2_DEFAULT 65000
+
+#define HYSTERESIS_DEFAULT 3000
+
+#define SPEED_ON_AC_DEFAULT 2
+
+static int temp_limits[3] = {
+ TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
module_param_array(temp_limits, int, NULL, 0444);
MODULE_PARM_DESC(temp_limits,
"Millicelsius values above which the fan speed increases");
-static int hysteresis = 3000;
+static int hysteresis = HYSTERESIS_DEFAULT;
module_param(hysteresis, int, 0444);
MODULE_PARM_DESC(hysteresis,
"Hysteresis in millicelsius before lowering the fan speed");
-static int speed_on_ac = 2;
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
module_param(speed_on_ac, int, 0444);
MODULE_PARM_DESC(speed_on_ac,
"minimum fan speed to allow when system is powered by AC");
@@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
- if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+ if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
temp_limits[i]);
- return -EINVAL;
+ temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+ temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+ temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+ break;
}
}
if (hysteresis < 1000 || hysteresis > 10000) {
dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
hysteresis);
- return -EINVAL;
+ hysteresis = HYSTERESIS_DEFAULT;
}
if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
speed_on_ac);
- return -EINVAL;
+ speed_on_ac = SPEED_ON_AC_DEFAULT;
}
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ef6d4bd77b1a..43d590250228 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -19,6 +19,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
+ {"INT1051", 0},
{"INT33D5", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
new file mode 100644
index 000000000000..2b1a0734c3f8
--- /dev/null
+++ b/drivers/platform/x86/intel-uncore-frequency.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_UNCORE_RATIO_LIMIT 0x620
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+/**
+ * struct uncore_data - Encapsulate all uncore data
+ * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
+ * on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu: Designated CPU for a die to read/write
+ * @valid: Mark the data valid/invalid
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+ struct kobject kobj;
+ u64 stored_uncore_data;
+ u32 initial_min_freq_khz;
+ u32 initial_max_freq_khz;
+ int control_cpu;
+ bool valid;
+};
+
+#define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Root of the all uncore sysfs kobjs */
+struct kobject uncore_root_kobj;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+
+struct uncore_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *kobj,
+ struct attribute *attr, const char *c, ssize_t count);
+};
+
+#define define_one_uncore_ro(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_uncore_rw(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define show_uncore_data(member_name) \
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct attribute *attr, \
+ char *buf) \
+ { \
+ struct uncore_data *data = to_uncore_data(kobj); \
+ return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ data->member_name); \
+ } \
+ define_one_uncore_ro(member_name)
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+/* Common function to read MSR 0x620 and read min/max */
+static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ u64 cap;
+ int ret;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+/* Common function to set min/max ratios to be used by sysfs callbacks */
+static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
+ int set_max)
+{
+ int ret;
+ u64 cap;
+
+ mutex_lock(&uncore_lock);
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > 0x7F) {
+ ret = -EINVAL;
+ goto finish_write;
+ }
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ goto finish_write;
+
+ if (set_max) {
+ cap &= ~0x7F;
+ cap |= input;
+ } else {
+ cap &= ~GENMASK(14, 8);
+ cap |= (input << 8);
+ }
+
+ ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ if (ret)
+ goto finish_write;
+
+ data->stored_uncore_data = cap;
+
+finish_write:
+ mutex_unlock(&uncore_lock);
+
+ return ret;
+}
+
+static ssize_t store_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, ssize_t count,
+ int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ uncore_write_ratio(data, input, min_max);
+
+ return count;
+}
+
+static ssize_t show_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf, int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int min, max;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read_ratio(data, &min, &max);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ if (min_max)
+ return sprintf(buf, "%u\n", max);
+
+ return sprintf(buf, "%u\n", min);
+}
+
+#define store_uncore_min_max(name, min_max) \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buf, ssize_t count) \
+ { \
+ \
+ return store_min_max_freq_khz(kobj, attr, buf, count, \
+ min_max); \
+ }
+
+#define show_uncore_min_max(name, min_max) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct attribute *attr, char *buf) \
+ { \
+ \
+ return show_min_max_freq_khz(kobj, attr, buf, min_max); \
+ }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+define_one_uncore_rw(min_freq_khz);
+define_one_uncore_rw(max_freq_khz);
+
+static struct attribute *uncore_attrs[] = {
+ &initial_min_freq_khz.attr,
+ &initial_max_freq_khz.attr,
+ &max_freq_khz.attr,
+ &min_freq_khz.attr,
+ NULL
+};
+
+static struct kobj_type uncore_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uncore_attrs,
+};
+
+static struct kobj_type uncore_root_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < uncore_max_entries)
+ return &uncore_instances[id];
+
+ return NULL;
+}
+
+static void uncore_add_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (!data) {
+ mutex_unlock(&uncore_lock);
+ return;
+ }
+
+ if (data->valid) {
+ /* control cpu changed */
+ data->control_cpu = cpu;
+ } else {
+ char str[64];
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+ sprintf(str, "package_%02d_die_%02d",
+ topology_physical_package_id(cpu),
+ topology_die_id(cpu));
+
+ uncore_read_ratio(data, &data->initial_min_freq_khz,
+ &data->initial_max_freq_khz);
+
+ ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
+ &uncore_root_kobj, str);
+ if (!ret) {
+ data->control_cpu = cpu;
+ data->valid = true;
+ }
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+/* Last CPU in this die is offline, so remove sysfs entries */
+static void uncore_remove_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (data) {
+ kobject_put(&data->kobj);
+ data->control_cpu = -1;
+ data->valid = false;
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+ int target;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+ uncore_add_die_entry(cpu);
+
+ return 0;
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+ int target;
+
+ /* Check if existing cpu is used for uncore MSRs */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return 0;
+
+ /* Find a new cpu to set uncore MSR */
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+ uncore_add_die_entry(target);
+ } else {
+ uncore_remove_die_entry(cpu);
+ }
+
+ return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+ void *_unused)
+{
+ int cpu;
+
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ for_each_cpu(cpu, &uncore_cpu_mask) {
+ struct uncore_data *data;
+ int ret;
+
+ data = uncore_get_instance(cpu);
+ if (!data || !data->valid || !data->stored_uncore_data)
+ continue;
+
+ ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
+ data->stored_uncore_data);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+ .notifier_call = uncore_pm_notify,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ ICPU(INTEL_FAM6_BROADWELL_G),
+ ICPU(INTEL_FAM6_BROADWELL_X),
+ ICPU(INTEL_FAM6_BROADWELL_D),
+ ICPU(INTEL_FAM6_SKYLAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_D),
+ {}
+};
+
+static int __init intel_uncore_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(intel_uncore_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ uncore_max_entries = topology_max_packages() *
+ topology_max_die_per_package();
+ uncore_instances = kcalloc(uncore_max_entries,
+ sizeof(*uncore_instances), GFP_KERNEL);
+ if (!uncore_instances)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
+ &cpu_subsys.dev_root->kobj,
+ "intel_uncore_frequency");
+ if (ret)
+ goto err_free;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/uncore-freq:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret < 0)
+ goto err_rem_kobj;
+
+ uncore_hp_state = ret;
+
+ ret = register_pm_notifier(&uncore_pm_nb);
+ if (ret)
+ goto err_rem_state;
+
+ return 0;
+
+err_rem_state:
+ cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+ kobject_put(&uncore_root_kobj);
+err_free:
+ kfree(uncore_instances);
+
+ return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+ int i;
+
+ unregister_pm_notifier(&uncore_pm_nb);
+ cpuhp_remove_state(uncore_hp_state);
+ for (i = 0; i < uncore_max_entries; ++i) {
+ if (uncore_instances[i].valid)
+ kobject_put(&uncore_instances[i].kobj);
+ }
+ kobject_put(&uncore_root_kobj);
+ kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
index b0f421fea2a5..805fc0d8515c 100644
--- a/drivers/platform/x86/intel_atomisp2_pm.c
+++ b/drivers/platform/x86/intel_atomisp2_pm.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
- * Trail devices. The sole purpose of this driver is to allow the ISP to
- * be put in D3.
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
*
* Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
*
@@ -36,8 +36,7 @@
static int isp_set_power(struct pci_dev *dev, bool enable)
{
unsigned long timeout;
- u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
- ISPSSPM0_IUNIT_POWER_OFF;
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
/* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
@@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable)
/*
* There should be no IUNIT access while power-down is
- * in progress HW sighting: 4567865
+ * in progress. HW sighting: 4567865.
* Wait up to 50 ms for the IUNIT to shut down.
* And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
- while (1) {
+ do {
u32 tmp;
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
if (tmp == val)
- break;
+ return 0;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
- enable ? "on" : "off");
- return -EBUSY;
- }
usleep_range(1000, 2000);
- }
+ } while (time_before(jiffies, timeout));
- return 0;
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
}
static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c
index 2d097fc2dd46..04138215956b 100644
--- a/drivers/platform/x86/intel_cht_int33fe_typec.c
+++ b/drivers/platform/x86/intel_cht_int33fe_typec.c
@@ -36,30 +36,6 @@ enum {
INT33FE_NODE_MAX,
};
-static const struct software_node nodes[];
-
-static const struct software_node_ref_args pi3usb30532_ref = {
- &nodes[INT33FE_NODE_PI3USB30532]
-};
-
-static const struct software_node_ref_args dp_ref = {
- &nodes[INT33FE_NODE_DISPLAYPORT]
-};
-
-static struct software_node_ref_args mux_ref;
-
-static const struct software_node_reference usb_connector_refs[] = {
- { "orientation-switch", 1, &pi3usb30532_ref},
- { "mode-switch", 1, &pi3usb30532_ref},
- { "displayport", 1, &dp_ref},
- { }
-};
-
-static const struct software_node_reference fusb302_refs[] = {
- { "usb-role-switch", 1, &mux_ref},
- { }
-};
-
/*
* Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
* the max17047 both through the INT33FE ACPI device (it is right there
@@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = {
{ }
};
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+ { .node = NULL },
+};
+
static const struct property_entry fusb302_props[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
{ }
};
@@ -112,6 +98,8 @@ static const u32 snk_pdo[] = {
PDO_VAR(5000, 12000, 3000),
};
+static const struct software_node nodes[];
+
static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
@@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ PROPERTY_ENTRY_REF("orientation-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("mode-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("displayport",
+ &nodes[INT33FE_NODE_DISPLAYPORT]),
{ }
};
static const struct software_node nodes[] = {
- { "fusb302", NULL, fusb302_props, fusb302_refs },
+ { "fusb302", NULL, fusb302_props },
{ "max17047", NULL, max17047_props },
{ "pi3usb30532" },
{ "displayport" },
- { "connector", &nodes[0], usb_connector_props, usb_connector_refs },
+ { "connector", &nodes[0], usb_connector_props },
{ }
};
@@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
{
software_node_unregister_nodes(nodes);
- if (mux_ref.node) {
- fwnode_handle_put(software_node_fwnode(mux_ref.node));
- mux_ref.node = NULL;
+ if (fusb302_mux_refs[0].node) {
+ fwnode_handle_put(
+ software_node_fwnode(fusb302_mux_refs[0].node));
+ fusb302_mux_refs[0].node = NULL;
}
if (data->dp) {
@@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
{
+ const struct software_node *mux_ref_node;
int ret;
- ret = software_node_register_nodes(nodes);
- if (ret)
- return ret;
-
- /* The devices that are not created in this driver need extra steps. */
-
/*
* There is no ACPI device node for the USB role mux, so we need to wait
* until the mux driver has created software node for the mux device.
* It means we depend on the mux driver. This function will return
* -EPROBE_DEFER until the mux device is registered.
*/
- mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
- if (!mux_ref.node) {
- ret = -EPROBE_DEFER;
- goto err_remove_nodes;
- }
+ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref_node)
+ return -EPROBE_DEFER;
+
+ /*
+ * Update node used in "usb-role-switch" property. Note that we
+ * rely on software_node_register_nodes() to use the original
+ * instance of properties instead of copying them.
+ */
+ fusb302_mux_refs[0].node = mux_ref_node;
+
+ ret = software_node_register_nodes(nodes);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
/*
* The DP connector does have ACPI device node. In this case we can just
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
index 512ad234ad0d..35ed9711c7b9 100644
--- a/drivers/platform/x86/intel_ips.h
+++ b/drivers/platform/x86/intel_ips.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010 Intel Corporation
*/
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 292bace83f1e..6f436836fe50 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- ddata = (struct mid_pb_ddata *)id->driver_data;
+ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
+ sizeof(*ddata), GFP_KERNEL);
if (!ddata)
- return -ENODATA;
+ return -ENOMEM;
ddata->dev = &pdev->dev;
ddata->irq = irq;
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 571b4754477c..144faa8bad3d 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = {
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
{"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
- {},
+ {}
};
static const struct pmc_bit_map spt_mphy_map[] = {
@@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = {
{"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
{"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
{"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
- {},
+ {}
};
static const struct pmc_bit_map spt_pfear_map[] = {
@@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = {
{"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
{"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
{"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
- {},
+ {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ spt_pfear_map,
+ NULL
};
static const struct pmc_bit_map spt_ltr_show_map[] = {
@@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = {
};
static const struct pmc_reg_map spt_reg_map = {
- .pfear_sts = spt_pfear_map,
+ .pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
.ltr_show_sts = spt_ltr_show_map,
@@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and Elkhart Lake.
+ */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
@@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and ELkhart Lake.
+ */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ cnp_pfear_map,
+ NULL
+};
+static const struct pmc_bit_map icl_pfear_map[] = {
/* Ice Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
@@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{}
};
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+ /* Tiger Lake and Elkhart Lake generation onwards only */
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
+};
+
static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{"AUDIO_D3", BIT(0)},
{"OTG_D3", BIT(1)},
@@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
cnp_slps0_dbg0_map,
cnp_slps0_dbg1_map,
cnp_slps0_dbg2_map,
- NULL,
+ NULL
};
static const struct pmc_bit_map cnp_ltr_show_map[] = {
@@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
};
static const struct pmc_reg_map cnp_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
@@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = {
};
static const struct pmc_reg_map icl_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
@@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = {
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
};
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
- return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+};
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
- reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ u32 val)
{
writel(val, pmcdev->regbase + reg_offset);
}
@@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void)
#if IS_ENABLED(CONFIG_DEBUG_FS)
static bool slps0_dbg_latch;
-static void pmc_core_display_map(struct seq_file *s, int index,
- u8 pf_reg, const struct pmc_bit_map *pf_map)
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ u8 pf_reg, const struct pmc_bit_map **pf_map)
{
seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
- index, pf_map[index].name,
- pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+ ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
}
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter;
+ int index, iter, idx, ip = 0;
iter = pmcdev->map->ppfear0_offset;
@@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
- for (index = 0; map[index].name &&
- index < pmcdev->map->ppfear_buckets * 8; index++)
- pmc_core_display_map(s, index, pf_regs[index / 8], map);
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip,
+ pf_regs[index / 8], maps);
+ }
return 0;
}
@@ -561,21 +623,22 @@ out_unlock:
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 val, buf_size, fd;
- int err = 0;
+ int err;
buf_size = count < 64 ? count : 64;
- mutex_lock(&pmcdev->lock);
- if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
- err = -EFAULT;
- goto out_unlock;
- }
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&pmcdev->lock);
if (val > map->ltr_ignore_max) {
err = -EINVAL;
@@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
&pmc_core_dev_state);
- debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
- &pmc_core_ppfear_fops);
+ if (pmcdev->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
@@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
+ INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
- { 0, },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
};
/*
* This quirk can be used on those platforms where
- * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * the platform BIOS enforces 24Mhz crystal to shutdown
* before PMC can assert SLP_S0#.
*/
static int quirk_xtal_ignore(const struct dmi_system_id *id)
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index fdee5772e532..f1a0792b3f91 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel Core SoC Power Management Controller Header File
*
@@ -186,6 +186,8 @@ enum ppfear_regs {
#define ICL_NUM_IP_IGN_ALLOWED 20
#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define TGL_NUM_IP_IGN_ALLOWED 22
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
@@ -213,7 +215,7 @@ struct pmc_bit_map {
* captures them to have a common implementation.
*/
struct pmc_reg_map {
- const struct pmc_bit_map *pfear_sts;
+ const struct pmc_bit_map **pfear_sts;
const struct pmc_bit_map *mphy_sts;
const struct pmc_bit_map *pll_sts;
const struct pmc_bit_map **slps0_dbg_maps;
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index 6fe829f30997..e1266f5c6359 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 5c1da2bb1435..2433bf73f1ed 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -12,23 +12,13 @@
*/
#include <linux/acpi.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/suspend.h>
#include <asm/intel_pmc_ipc.h>
@@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset)
writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
}
-static inline u8 __maybe_unused ipc_data_readb(u32 offset)
-{
- return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
static inline u32 ipc_data_readl(u32 offset)
{
return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
@@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset)
}
/**
- * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: data pointer for storing the register output
- *
- * Reads the 32-bit PMC GCR register at given offset.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- *data = readl(ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
-
-/**
* intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
* @offset: offset of GCR register from GCR address base
* @data: data pointer for storing the register output
@@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data)
EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
/**
- * intel_pmc_gcr_write() - Write PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: register update value
- *
- * Writes the PMC GCR register of given offset with given
- * value.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_write(u32 offset, u32 data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- writel(data, ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
-
-/**
* intel_pmc_gcr_update() - Update PMC GCR register bits
* @offset: offset of GCR register from GCR address base
* @mask: bit mask for update operation
@@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
*
* Return: negative value on error or 0 on success.
*/
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
{
u32 new_val;
int ret = 0;
@@ -339,7 +265,6 @@ gcr_ipc_unlock:
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
static int update_no_reboot_bit(void *priv, bool set)
{
@@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void)
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_simple_command(int cmd, int sub)
+static int intel_pmc_ipc_simple_command(int cmd, int sub)
{
int ret;
@@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
/**
* intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
@@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
- u32 outlen, u32 dptr, u32 sptr)
+static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+ u32 outlen, u32 dptr, u32 sptr)
{
u32 wbuf[4] = { 0 };
int ret;
@@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
/**
* intel_pmc_ipc_command() - IPC command with input/output data
@@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
}
return (ssize_t)count;
}
+static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
struct device_attribute *attr,
@@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
int subcmd;
int ret;
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
if (val)
subcmd = 1;
@@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
}
return (ssize_t)count;
}
-
-static DEVICE_ATTR(simplecmd, S_IWUSR,
- NULL, intel_pmc_ipc_simple_cmd_store);
-static DEVICE_ATTR(northpeak, S_IWUSR,
- NULL, intel_pmc_ipc_northpeak_store);
+static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
static struct attribute *intel_ipc_attrs[] = {
&dev_attr_northpeak.attr,
@@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = {
.attrs = intel_ipc_attrs,
};
+static const struct attribute_group *intel_ipc_groups[] = {
+ &intel_ipc_group,
+ NULL
+};
+
static struct resource punit_res_array[] = {
/* Punit BIOS */
{
@@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev)
goto err_irq;
}
- ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
- ret);
- goto err_sys;
- }
-
ipcdev.has_gcr_regs = true;
return 0;
-err_sys:
- devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
+
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
@@ -980,7 +898,6 @@ err_irq:
static int ipc_plat_remove(struct platform_device *pdev)
{
- sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
@@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = {
.driver = {
.name = "pmc-ipc-plat",
.acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+ .dev_groups = intel_ipc_groups,
},
};
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index cdab916fbf92..3d7da5266136 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,11 +26,7 @@
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
-#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
/* Command id associated with message IPCMSG_PCNTRL */
#define IPC_CMD_PCNTRL_W 0 /* Register write */
@@ -58,56 +54,29 @@
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
-#define PCI_DEVICE_ID_LINCROFT 0x082a
-#define PCI_DEVICE_ID_PENWELL 0x080e
-#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
-#define PCI_DEVICE_ID_TANGIER 0x11a0
-
-/* intel scu ipc driver data */
-struct intel_scu_ipc_pdata_t {
- u32 i2c_base;
- u32 i2c_len;
- u8 irq_mode;
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
-/* Penwell and Cloverview */
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 1,
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
- .i2c_base = 0xff00d000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
struct intel_scu_ipc_dev {
struct device *dev;
void __iomem *ipc_base;
- void __iomem *i2c_base;
struct completion cmd_complete;
u8 irq_mode;
};
static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+#define IPC_STATUS 0x04
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
+
/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
*/
+#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
-#define IPC_I2C_CNTRL_ADDR 0
-#define I2C_DATA_ADDR 0x04
+/* Timeout in jiffies */
+#define IPC_TIMEOUT (3 * HZ)
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
*/
static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
{
- if (scu->irq_mode) {
- reinit_completion(&scu->cmd_complete);
- writel(cmd | IPC_IOC, scu->ipc_base);
- }
- writel(cmd, scu->ipc_base);
+ reinit_completion(&scu->cmd_complete);
+ writel(cmd | IPC_IOC, scu->ipc_base);
}
/*
@@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
*/
static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
{
- writel(data, scu->ipc_base + 0x80 + offset);
+ writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
@@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32
*/
static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
{
- return __raw_readl(scu->ipc_base + 0x04);
+ return __raw_readl(scu->ipc_base + IPC_STATUS);
}
/* Read ipc byte data */
@@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- u32 status = ipc_read_status(scu);
- u32 loop_count = 100000;
+ unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
- /* break if scu doesn't reset busy bit after huge retry */
- while ((status & BIT(0)) && --loop_count) {
- udelay(1); /* scu processing time is in few u secods */
- status = ipc_read_status(scu);
- }
+ do {
+ u32 status;
- if (status & BIT(0)) {
- dev_err(scu->dev, "IPC timed out");
- return -ETIMEDOUT;
- }
+ status = ipc_read_status(scu);
+ if (!(status & IPC_STATUS_BUSY))
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
- if (status & BIT(1))
- return -EIO;
+ usleep_range(50, 100);
+ } while (time_before(jiffies, end));
- return 0;
+ dev_err(scu->dev, "IPC timed out");
+ return -ETIMEDOUT;
}
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
@@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
+ if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
dev_err(scu->dev, "IPC timed out\n");
return -ETIMEDOUT;
}
status = ipc_read_status(scu);
- if (status & BIT(1))
+ if (status & IPC_STATUS_ERR)
return -EIO;
return 0;
@@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
}
/**
- * intel_scu_ipc_ioread8 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read byte
+ * intel_scu_ipc_ioread8 - read a word via the SCU
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
*
- * Read a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_ioread8(u16 addr, u8 *data)
{
@@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data)
EXPORT_SYMBOL(intel_scu_ipc_ioread8);
/**
- * intel_scu_ipc_ioread16 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read word
- *
- * Read a register pair. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- * intel_scu_ipc_ioread32 - read a dword via the SCU
- * @addr: register on SCU
- * @data: return pointer for read dword
- *
- * Read four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- * intel_scu_ipc_iowrite8 - write a byte via the SCU
- * @addr: register on SCU
- * @data: byte to write
+ * intel_scu_ipc_iowrite8 - write a byte via the SCU
+ * @addr: Register on SCU
+ * @data: Byte to write
*
- * Write a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_iowrite8(u16 addr, u8 data)
{
@@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data)
EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
/**
- * intel_scu_ipc_iowrite16 - write a word via the SCU
- * @addr: register on SCU
- * @data: word to write
- *
- * Write two registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_readvv - read a set of registers
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- * intel_scu_ipc_iowrite32 - write a dword via the SCU
- * @addr: register on SCU
- * @data: dword to write
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * Write four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * The largest array length permitted by the hardware is 5 items.
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- * intel_scu_ipc_readvv - read a set of registers
- * @addr: register list
- * @data: bytes to return
- * @len: length of array
- *
- * Read registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * The largest array length permitted by the hardware is 5 items.
- *
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
{
@@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
EXPORT_SYMBOL(intel_scu_ipc_readv);
/**
- * intel_scu_ipc_writev - write a set of registers
- * @addr: register list
- * @data: bytes to write
- * @len: length of array
- *
- * Write registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_writev - write a set of registers
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
*
- * The largest array length permitted by the hardware is 5 items.
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * The largest array length permitted by the hardware is 5 items.
*
+ * This function may sleep.
*/
int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
{
@@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
EXPORT_SYMBOL(intel_scu_ipc_writev);
/**
- * intel_scu_ipc_update_register - r/m/w a register
- * @addr: register address
- * @bits: bits to update
- * @mask: mask of bits to update
- *
- * Read-modify-write power control unit register. The first data argument
- * must be register value and second is mask value
- * mask is a bitmap that indicates which bits to update.
- * 0 = masked. Don't modify this bit, 1 = modify this bit.
- * returns 0 on success or an error code.
- *
- * This function may sleep. Locking between SCU accesses is handled
- * for the caller.
+ * intel_scu_ipc_update_register - r/m/w a register
+ * @addr: Register address
+ * @bits: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
*/
int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
{
@@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
EXPORT_SYMBOL(intel_scu_ipc_update_register);
/**
- * intel_scu_ipc_simple_command - send a simple command
- * @cmd: command
- * @sub: sub type
+ * intel_scu_ipc_simple_command - send a simple command
+ * @cmd: Command
+ * @sub: Sub type
*
- * Issue a simple command to the SCU. Do not use this interface if
- * you must then access data as any data values may be overwritten
- * by another SCU access by the time this function returns.
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
*
- * This function may sleep. Locking for SCU accesses is handled for
- * the caller.
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
*/
int intel_scu_ipc_simple_command(int cmd, int sub)
{
@@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
EXPORT_SYMBOL(intel_scu_ipc_simple_command);
/**
- * intel_scu_ipc_command - command with data
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in dwords
- * @out: output data
- * @outlein: output length in dwords
- *
- * Issue a command to the SCU which involves data transfers. Do the
- * data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_command - command with data
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in dwords
+ * @out: Output data
+ * @outlen: Output length in dwords
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
*/
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
u32 *out, int outlen)
@@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
}
EXPORT_SYMBOL(intel_scu_ipc_command);
-#define IPC_SPTR 0x08
-#define IPC_DPTR 0x0C
-
-/**
- * intel_scu_ipc_raw_command() - IPC command with data and pointers
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- * @in: input data of this IPC command.
- * @inlen: input data length in dwords.
- * @out: output data of this IPC command.
- * @outlen: output data length in dwords.
- * @sptr: data writing to SPTR register.
- * @dptr: data writing to DPTR register.
- *
- * Send an IPC command to SCU with input/output data and source/dest pointers.
- *
- * Return: an IPC error code or 0 on success.
- */
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
- u32 *out, int outlen, u32 dptr, u32 sptr)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- int inbuflen = DIV_ROUND_UP(inlen, 4);
- u32 inbuf[4];
- int i, err;
-
- /* Up to 16 bytes */
- if (inbuflen > 4)
- return -EINVAL;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
-
- writel(dptr, scu->ipc_base + IPC_DPTR);
- writel(sptr, scu->ipc_base + IPC_SPTR);
-
- /*
- * SRAM controller doesn't support 8-bit writes, it only
- * supports 32-bit writes, so we have to copy input data into
- * the temporary buffer, and SCU FW will use the inlen to
- * determine the actual input data length in the temporary
- * buffer.
- */
- memcpy(inbuf, in, inlen);
-
- for (i = 0; i < inbuflen; i++)
- ipc_data_writel(scu, inbuf[i], 4 * i);
-
- ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
- err = intel_scu_ipc_check_status(scu);
- if (!err) {
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(scu, 4 * i);
- }
-
- mutex_unlock(&ipclock);
- return err;
-}
-EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
-
-/* I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ 2 /* I2C Read command */
-
-/**
- * intel_scu_ipc_i2c_cntrl - I2C read/write operations
- * @addr: I2C address + command bits
- * @data: data to read/write
- *
- * Perform an an I2C read/write operation via the SCU. All locking is
- * handled for the caller. This function may sleep.
- *
- * Returns an error code or 0 on success.
- *
- * This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- u32 cmd = 0;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- cmd = (addr >> 24) & 0xFF;
- if (cmd == IPC_I2C_READ) {
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- /* Write not getting updated without delay */
- usleep_range(1000, 2000);
- *data = readl(scu->i2c_base + I2C_DATA_ADDR);
- } else if (cmd == IPC_I2C_WRITE) {
- writel(*data, scu->i2c_base + I2C_DATA_ADDR);
- usleep_range(1000, 2000);
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- } else {
- dev_err(scu->dev,
- "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
- mutex_unlock(&ipclock);
- return -EIO;
- }
- mutex_unlock(&ipclock);
- return 0;
-}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
-
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
@@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
static irqreturn_t ioc(int irq, void *dev_id)
{
struct intel_scu_ipc_dev *scu = dev_id;
+ int status = ipc_read_status(scu);
- if (scu->irq_mode)
- complete(&scu->cmd_complete);
+ writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+ complete(&scu->cmd_complete);
return IRQ_HANDLED;
}
@@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err;
struct intel_scu_ipc_dev *scu = &ipcdev;
- struct intel_scu_ipc_pdata_t *pdata;
if (scu->dev) /* We support only one SCU */
return -EBUSY;
- pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
- if (!pdata)
- return -ENODEV;
-
- scu->irq_mode = pdata->irq_mode;
-
err = pcim_enable_device(pdev);
if (err)
return err;
@@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scu->ipc_base = pcim_iomap_table(pdev)[0];
- scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
- if (!scu->i2c_base)
- return -ENOMEM;
-
err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
scu);
if (err)
@@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
}
-#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
-
static const struct pci_device_id pci_ids[] = {
- SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata),
+ { PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x08ea) },
+ { PCI_VDEVICE(INTEL, 0x11a0) },
{}
};
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
index 3de5a3c66529..0c2aa22c7a12 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
@@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
{0x7F, 0x00, 0x0B},
{0x7F, 0x10, 0x12},
{0x7F, 0x20, 0x23},
+ {0x94, 0x03, 0x03},
+ {0x95, 0x03, 0x03},
};
static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
@@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
{0xD0, 0x03, 0x08},
{0x7F, 0x02, 0x00},
{0x7F, 0x08, 0x00},
+ {0x95, 0x03, 0x03},
};
struct isst_cmd {
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index e84d3e983e0c..8e3fb55ac1ae 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file,
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
if (err) {
pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
@@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file,
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
if (err) {
pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index df8565bad595..c4c742bb23cf 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = {
static int telemetry_pltdrv_probe(struct platform_device *pdev)
{
- struct resource *res0 = NULL, *res1 = NULL;
const struct x86_cpu_id *id;
- int size, ret = -ENOMEM;
+ void __iomem *mem;
+ int ret;
id = x86_match_cpu(telemetry_cpu_ids);
if (!id)
@@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
telm_conf = (struct telemetry_plt_config *)id->driver_data;
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res0) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res0);
- if (!devm_request_mem_region(&pdev->dev, res0->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- telm_conf->pss_config.ssram_base_addr = res0->start;
- telm_conf->pss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res1) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res1);
- if (!devm_request_mem_region(&pdev->dev, res1->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
+ telm_conf->pss_config.regmap = mem;
- telm_conf->ioss_config.ssram_base_addr = res1->start;
- telm_conf->ioss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- telm_conf->pss_config.regmap = ioremap_nocache(
- telm_conf->pss_config.ssram_base_addr,
- telm_conf->pss_config.ssram_size);
- if (!telm_conf->pss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
-
- telm_conf->ioss_config.regmap = ioremap_nocache(
- telm_conf->ioss_config.ssram_base_addr,
- telm_conf->ioss_config.ssram_size);
- if (!telm_conf->ioss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
+ telm_conf->ioss_config.regmap = mem;
mutex_init(&telm_conf->telem_lock);
mutex_init(&telm_conf->telem_trace_lock);
@@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
return 0;
out:
- if (res0)
- release_mem_region(res0->start, resource_size(res0));
- if (res1)
- release_mem_region(res1->start, resource_size(res1));
- if (telm_conf->pss_config.regmap)
- iounmap(telm_conf->pss_config.regmap);
- if (telm_conf->ioss_config.regmap)
- iounmap(telm_conf->ioss_config.regmap);
dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
return ret;
@@ -1204,9 +1163,6 @@ out:
static int telemetry_pltdrv_remove(struct platform_device *pdev)
{
telemetry_clear_pltdata();
- iounmap(telm_conf->pss_config.regmap);
- iounmap(telm_conf->ioss_config.regmap);
-
return 0;
}
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 8fe51e43f1bc..c27548fd386a 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -35,6 +35,8 @@
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
@@ -46,6 +48,8 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
@@ -68,6 +72,7 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
@@ -85,9 +90,13 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
@@ -96,6 +105,9 @@
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
@@ -112,17 +124,29 @@
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
/* Maximum number of possible physical buses equipped on system */
#define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24
/* Number of channels in group */
#define MLXPLAT_CPLD_GRP_CHNL_NUM 8
@@ -130,14 +154,16 @@
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
+#define MLXPLAT_CPLD_CH3 18
/* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 3
/* Hotplug devices adapter numbers */
#define MLXPLAT_CPLD_NR_NONE -1
#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3
#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
@@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = {
IORESOURCE_IO),
};
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+ {
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+ .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_i2c_ng_items_data,
+ },
+};
+
/* Platform next generation systems i2c data */
static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+ .items = mlxplat_mlxcpld_i2c_ng_items,
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
@@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
/* Platform mux data */
-static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
@@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
};
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH3,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
{
@@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
},
};
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
/* Platform hotplug default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
{
@@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
},
};
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.items = mlxplat_mlxcpld_default_items,
@@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+ .items = mlxplat_mlxcpld_comex_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
{
.label = "pwr1",
@@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_ext_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+ .items = mlxplat_mlxcpld_ext_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
};
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+ .data = mlxplat_mlxcpld_comex_100G_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
@@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
.mode = 0200,
},
{
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
@@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
@@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
@@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.bit = GENMASK(7, 0),
.mode = 0444,
},
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
@@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
@@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
@@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
@@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
@@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+ MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
@@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
};
@@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
@@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
return 1;
-};
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+ mlxplat_mux_data = mlxplat_extended_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_led = &mlxplat_comex_100G_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
@@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_comex_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
- MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) {
+ mlxplat_max_adap_num; i++) {
search_adap = i2c_get_adapter(i);
if (search_adap) {
i2c_put_adapter(search_adap);
@@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
}
/* Return with error if free id for adapter is not found. */
- if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM)
+ if (i == mlxplat_max_adap_num)
return -ENODEV;
/* Shift adapter ids, since expected parent adapter is not free. */
*nr = i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
shift = *nr - mlxplat_mux_data[i].parent;
mlxplat_mux_data[i].parent = *nr;
mlxplat_mux_data[i].base_nr += shift;
@@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void)
if (nr < 0)
goto fail_alloc;
- nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr;
+ nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
priv->pdev_i2c = platform_device_register_resndata(
@@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void)
goto fail_alloc;
}
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
priv->pdev_mux[i] = platform_device_register_resndata(
&priv->pdev_i2c->dev,
"i2c-mux-reg", i, NULL,
@@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_led);
platform_device_unregister(priv->pdev_hotplug);
- for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+ for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 52ef1419b671..3e3c66dfec2e 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -489,7 +489,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
pmc->base_addr &= PMC_BASE_ADDR_MASK;
- pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+ pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN);
if (!pmc->regmap) {
dev_err(&pdev->dev, "error: ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 9b6a93ff41ff..23e40aa2176e 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1394,7 +1394,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
int ret = 0;
int i;
- samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+ samsung->f0000_segment = ioremap(0xf0000, 0xffff);
if (!samsung->f0000_segment) {
if (debug || force)
pr_err("Can't map the segment at 0xf0000\n");
@@ -1434,7 +1434,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
if (debug)
samsung_sabi_infos(samsung, loca, ifaceP);
- samsung->sabi_iface = ioremap_nocache(ifaceP, 16);
+ samsung->sabi_iface = ioremap(ifaceP, 16);
if (!samsung->sabi_iface) {
pr_err("Can't remap %x\n", ifaceP);
ret = -EINVAL;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 72205771d03d..93177e6e5ecd 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-gp-electronic-t701.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
{ }
};
@@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v80-plus-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-onda-v820w-32g.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-onda-v891w-v1.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v891w-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-pipo-w2s.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
{ }
};
@@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = {
.properties = pipo_w2s_props,
};
+static const struct property_entry pipo_w11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w11_props,
+};
+
static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-teclast_x98plus2.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c11.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = {
static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c13.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = {
static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primetab-t13b.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
@@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-surftab-wintron70-st70416-6.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Pipo W11 */
+ .driver_data = (void *)&pipo_w11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ /* Above matches are too generic, add bios-ver match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+ },
+ },
+ {
/* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
@@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "SurfTab wintron 7.0 ST70416-6"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
},
@@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
}
static int ts_dmi_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
+ unsigned long action, void *data)
{
struct device *dev = data;
struct i2c_client *client;
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 179b737280e1..c43d8ad02529 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -747,34 +747,12 @@ __skip:
}
/*
- * Compute ISA PnP checksum for first eight bytes.
- */
-static unsigned char __init isapnp_checksum(unsigned char *data)
-{
- int i, j;
- unsigned char checksum = 0x6a, bit, b;
-
- for (i = 0; i < 8; i++) {
- b = data[i];
- for (j = 0; j < 8; j++) {
- bit = 0;
- if (b & (1 << j))
- bit = 1;
- checksum =
- ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
- | (checksum >> 1);
- }
- }
- return checksum;
-}
-
-/*
* Build device list for all present ISA PnP devices.
*/
static int __init isapnp_build_device_list(void)
{
int csn;
- unsigned char header[9], checksum;
+ unsigned char header[9];
struct pnp_card *card;
u32 eisa_id;
char id[8];
@@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void)
for (csn = 1; csn <= isapnp_csn_count; csn++) {
isapnp_wake(csn);
isapnp_peek(header, 9);
- checksum = isapnp_checksum(header);
eisa_id = header[0] | header[1] << 8 |
header[2] << 16 | header[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 089b6244b716..b8fe166cd0d9 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -12,6 +12,22 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on POWER_AVS
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index a1b8cd453f19..9007d05853e2 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644
index 000000000000..9192fb747653
--- /dev/null
+++ b/drivers/power/avs/qcom-cpr.c
@@ -0,0 +1,1793 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define FUSE_REVISION_UNKNOWN (-1)
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_fuse {
+ char *ring_osc;
+ char *init_voltage;
+ char *quotient;
+ char *quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_volt_scale;
+ int max_quot_scale;
+ /* fuse quot */
+ int quot_offset;
+ int quot_scale;
+ int quot_adjust;
+ /* fuse quot_offset */
+ int quot_offset_scale;
+ int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+ int init_voltage_step;
+ int init_voltage_width;
+ struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ int min_diff_quot;
+ int *step_quot;
+
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+
+ struct cpr_fuses cpr_fuses;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *config;
+ struct reg_sequence *settings;
+ int num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+ const struct cpr_desc *cpr_desc;
+ const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_corners;
+ unsigned int ref_clk_khz;
+
+ struct generic_pm_domain pd;
+ struct device *dev;
+ struct device *attached_cpu_dev;
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct clk *cpu_clk;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ u32 gcnt;
+ unsigned long flags;
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct cpr_fuse *cpr_fuses;
+
+ struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ const struct cpr_desc *desc = drv->desc;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+ ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f == end)
+ return;
+
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+ int new_uV, enum voltage_change_dir dir)
+{
+ int ret;
+ struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+ if (ret) {
+ dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+ new_uV);
+ return ret;
+ }
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+ return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV, ret;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ if (dir != UP && dir != DOWN)
+ return 0;
+
+ step_uV = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_uV)
+ return -EINVAL;
+
+ corner = drv->corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->up_threshold,
+ desc->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_up_limit)
+ error_steps = desc->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ new_uV = min(new_uV, corner->max_uV);
+
+ dev_dbg(drv->dev,
+ "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ } else if (dir == DOWN) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->down_threshold,
+ desc->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_down_limit)
+ error_steps = desc->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ new_uV = max(new_uV, corner->min_uV);
+
+ dev_dbg(drv->dev,
+ "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ }
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret) {
+ cpr_irq_clr_nack(drv);
+ return ret;
+ }
+ drv->corner->last_uV = new_uV;
+
+ if (dir == UP) {
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else if (dir == DOWN) {
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = desc->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ }
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ const struct cpr_desc *desc = drv->desc;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ dev_dbg(drv->dev, "CPR is disabled\n");
+ ret = IRQ_NONE;
+ } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+ dev_dbg(drv->dev, "CPR measurement is not ready\n");
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ dev_err_ratelimited(drv->dev,
+ "Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ ret = IRQ_NONE;
+ } else {
+ /*
+ * Following sequence of handling is as per each IRQ's
+ * priority
+ */
+ if (val & CPR_INT_UP) {
+ cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+ } else {
+ dev_dbg(drv->dev,
+ "IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW ceiling, floor and vlevel */
+ val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /*
+ * Clear the target quotient value and gate count of all
+ * ring oscillators
+ */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+ desc->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+ __func__, state, cpr_get_cur_perf_state(drv));
+
+ /*
+ * Determine new corner we're going to.
+ * Remove one since lowest performance state is 1.
+ */
+ corner = drv->corners + state - 1;
+ end = &drv->corners[drv->num_corners - 1];
+ if (corner > end || corner < drv->corners) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Determine direction */
+ if (drv->corner > corner)
+ dir = DOWN;
+ else if (drv->corner < corner)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ if (cpr_is_allowed(drv))
+ new_uV = corner->last_uV;
+ else
+ new_uV = corner->uV;
+
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv)) {
+ cpr_irq_clr(drv);
+ if (drv->corner != corner)
+ cpr_corner_restore(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ char *ret;
+ int i;
+
+ *data = 0;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) != -EPROBE_DEFER)
+ dev_err(dev, "undefined cell %s\n", cname);
+ return PTR_ERR(cell);
+ }
+
+ ret = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(ret)) {
+ dev_err(dev, "can't read cell %s\n", cname);
+ return PTR_ERR(ret);
+ }
+
+ for (i = 0; i < len; i++)
+ *data |= ret[i] << (8 * i);
+
+ kfree(ret);
+ dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
+
+ return 0;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ u32 data;
+ int ret;
+
+ for (; fuse < end; fuse++, fuses++) {
+ ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
+ &data);
+ if (ret)
+ return ret;
+ fuse->ring_osc_idx = data;
+ }
+
+ return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+ const struct fuse_corner_data *fdata,
+ const char *init_v_efuse,
+ int step_volt,
+ struct cpr_drv *drv)
+{
+ int step_size_uV, steps, uV;
+ u32 bits = 0;
+ int ret;
+
+ ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
+ if (ret)
+ return ret;
+
+ steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+ /* Not two's complement.. instead highest bit is sign bit */
+ if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+ steps = -steps;
+
+ step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+ uV = fdata->ref_uV + steps * step_size_uV;
+ return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int i;
+ unsigned int step_volt;
+ struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end;
+ int uV;
+ const struct reg_sequence *accs;
+ int ret;
+
+ accs = acc_desc->settings;
+
+ step_volt = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_volt)
+ return -EINVAL;
+
+ /* Populate fuse_corner members */
+ fuse = drv->fuse_corners;
+ end = &fuse[desc->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+
+ for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+ /*
+ * Update SoC voltages: platforms might choose a different
+ * regulators than the one used to characterize the algorithms
+ * (ie, init_voltage_step).
+ */
+ fdata->min_uV = roundup(fdata->min_uV, step_volt);
+ fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+ /* Populate uV */
+ uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+ step_volt, drv);
+ if (uV < 0)
+ return uV;
+
+ fuse->min_uV = fdata->min_uV;
+ fuse->max_uV = fdata->max_uV;
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Populate target quotient by scaling */
+ ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
+ if (ret)
+ return ret;
+
+ fuse->quot *= fdata->quot_scale;
+ fuse->quot += fdata->quot_offset;
+ fuse->quot += fdata->quot_adjust;
+ fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+ /* Populate acc settings */
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->min_uV,
+ fuse->min_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "min uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->min_uV, i);
+ return -EINVAL;
+ }
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->max_uV,
+ fuse->max_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "max uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->max_uV, i);
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev,
+ "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+ i, fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+ }
+
+ return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+ struct cpr_drv *drv,
+ const struct fuse_corner_data *fdata,
+ const struct corner *corner)
+{
+ u32 quot_diff = 0;
+ unsigned long freq_diff;
+ int scaling;
+ const struct fuse_corner *fuse, *prev_fuse;
+ int ret;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ if (quot_offset) {
+ ret = cpr_read_efuse(drv->dev, quot_offset, &quot_diff);
+ if (ret)
+ return ret;
+
+ quot_diff *= fdata->quot_offset_scale;
+ quot_diff += fdata->quot_offset_adjust;
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+ const struct fuse_corner_data *fdata)
+{
+ unsigned long f_high, f_low, f_diff;
+ int uV_high, uV_low, uV;
+ u64 temp, temp_limit;
+ const struct fuse_corner *fuse, *prev_fuse;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ f_high = fuse->max_freq;
+ f_low = prev_fuse->max_freq;
+ uV_high = fuse->uV;
+ uV_low = prev_fuse->uV;
+ f_diff = fuse->max_freq - corner->freq;
+
+ /*
+ * Don't interpolate in the wrong direction. This could happen
+ * if the adjusted fuse voltage overlaps with the previous fuse's
+ * adjusted voltage.
+ */
+ if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+ return corner->uV;
+
+ temp = f_diff * (uV_high - uV_low);
+ do_div(temp, f_high - f_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = f_diff * fdata->max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ uV = uV_high - min(temp, temp_limit);
+ return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+ struct device_node *np;
+ unsigned int fuse_corner = 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+ if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+ pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+ __func__);
+
+ of_node_put(np);
+
+ return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+ struct device *cpu_dev)
+{
+ u64 rate = 0;
+ struct device_node *ref_np;
+ struct device_node *desc_np;
+ struct device_node *child_np = NULL;
+ struct device_node *child_req_np = NULL;
+
+ desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!desc_np)
+ return 0;
+
+ ref_np = dev_pm_opp_get_of_node(ref);
+ if (!ref_np)
+ goto out_ref;
+
+ do {
+ of_node_put(child_req_np);
+ child_np = of_get_next_available_child(desc_np, child_np);
+ child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+ } while (child_np && child_req_np != ref_np);
+
+ if (child_np && child_req_np == ref_np)
+ of_property_read_u64(child_np, "opp-hz", &rate);
+
+ of_node_put(child_req_np);
+ of_node_put(child_np);
+ of_node_put(ref_np);
+out_ref:
+ of_node_put(desc_np);
+
+ return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ int i, level, scaling = 0;
+ unsigned int fnum, fc;
+ const char *quot_offset;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ struct corner_data *cdata;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling;
+ unsigned long freq_diff, freq_diff_mhz;
+ unsigned long freq;
+ int step_volt = regulator_get_linear_step(drv->vdd_apc);
+ struct dev_pm_opp *opp;
+
+ if (!step_volt)
+ return -EINVAL;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+
+ cdata = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(struct corner_data),
+ GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (level = 1; level <= drv->num_corners; level++) {
+ opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+ if (IS_ERR(opp))
+ return -EINVAL;
+ fc = cpr_get_fuse_corner(opp);
+ if (!fc) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ fnum = fc - 1;
+ freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+ if (!freq) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ cdata[level - 1].fuse_corner = fnum;
+ cdata[level - 1].freq = freq;
+
+ fuse = &drv->fuse_corners[fnum];
+ dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+ freq, dev_pm_opp_get_level(opp) - 1, fnum);
+ if (freq > fuse->max_freq)
+ fuse->max_freq = freq;
+ dev_pm_opp_put(opp);
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ fnum = cdata[i].fuse_corner;
+ fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+ quot_offset = fuses[fnum].quotient_offset;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->freq = cdata[i].freq;
+ corner->uV = fuse->uV;
+
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ scaling = cpr_calculate_scaling(quot_offset, drv,
+ fdata, corner);
+ if (scaling < 0)
+ return scaling;
+
+ apply_scaling = true;
+ } else if (corner->freq == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - corner->freq;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ corner->uV = cpr_interpolate(corner, step_volt, fdata);
+ }
+
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+
+ return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct cpr_fuse *fuses;
+ int i;
+
+ fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+ sizeof(struct cpr_fuse),
+ GFP_KERNEL);
+ if (!fuses)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < desc->num_fuse_corners; i++) {
+ char tbuf[32];
+
+ snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+ fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].ring_osc)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+ fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].init_voltage)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+ fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].quotient)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+ fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].quotient_offset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+ drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct clk *clk;
+
+ clk = clk_get(drv->dev, "ref");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+ clk_put(clk);
+
+ if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+ desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+ desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+ desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+ desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+ desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ return -EINVAL;
+
+ dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+ desc->up_threshold, desc->down_threshold);
+
+ return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ const struct corner *end;
+ struct corner *iter;
+ unsigned int i = 0;
+
+ if (!drv->cpu_clk) {
+ dev_err(drv->dev, "cannot get rate from NULL clk\n");
+ return -EINVAL;
+ }
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ /*
+ * Some bootloaders set a CPU clock frequency that is not defined
+ * in the OPP table. When running at an unlisted frequency,
+ * cpufreq_online() will change to the OPP which has the lowest
+ * frequency, at or above the unlisted frequency.
+ * Since cpufreq_online() always "rounds up" in the case of an
+ * unlisted frequency, this function always "rounds down" in case
+ * of an unlisted frequency. That way, when cpufreq_online()
+ * triggers the first ever call to cpr_set_performance_state(),
+ * it will correctly determine the direction as UP.
+ */
+ for (iter = drv->corners; iter <= end; iter++) {
+ if (iter->freq > rate)
+ break;
+ i++;
+ if (iter->freq == rate) {
+ drv->corner = iter;
+ break;
+ }
+ if (iter->freq < rate)
+ drv->corner = iter;
+ }
+
+ if (!drv->corner) {
+ dev_err(drv->dev, "boot up corner not found\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+ return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+ .num_fuse_corners = 3,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 25, 25, 25, },
+ .timer_delay_us = 5000,
+ .timer_cons_up = 0,
+ .timer_cons_down = 2,
+ .up_threshold = 1,
+ .down_threshold = 3,
+ .idle_clocks = 15,
+ .gcnt_us = 1,
+ .vdd_apc_step_up_limit = 1,
+ .vdd_apc_step_down_limit = 1,
+ .cpr_fuses = {
+ .init_voltage_step = 8000,
+ .init_voltage_width = 6,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* fuse corner 0 */
+ {
+ .ref_uV = 1224000,
+ .max_uV = 1224000,
+ .min_uV = 1048000,
+ .max_volt_scale = 0,
+ .max_quot_scale = 0,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 1 */
+ {
+ .ref_uV = 1288000,
+ .max_uV = 1288000,
+ .min_uV = 1048000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = -20,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 2 */
+ {
+ .ref_uV = 1352000,
+ .max_uV = 1384000,
+ .min_uV = 1088000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ },
+ },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xb120, 0x1041040 },
+ { 0xb124, 0x41 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ },
+ .config = (struct reg_sequence[]){
+ { 0xb138, 0xff },
+ { 0xb130, 0x5555 },
+ },
+ .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+ .cpr_desc = &qcs404_cpr_desc,
+ .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int ret = 0;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+ /*
+ * This driver only supports scaling voltage for a CPU cluster
+ * where all CPUs in the cluster share a single regulator.
+ * Therefore, save the struct device pointer only for the first
+ * CPU device that gets attached. There is no need to do any
+ * additional initialization when further CPUs get attached.
+ */
+ if (drv->attached_cpu_dev)
+ goto unlock;
+
+ /*
+ * cpr_scale_voltage() requires the direction (if we are changing
+ * to a higher or lower OPP). The first time
+ * cpr_set_performance_state() is called, there is no previous
+ * performance state defined. Therefore, we call
+ * cpr_find_initial_corner() that gets the CPU clock frequency
+ * set by the bootloader, so that we can determine the direction
+ * the first time cpr_set_performance_state() is called.
+ */
+ drv->cpu_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(drv->cpu_clk)) {
+ ret = PTR_ERR(drv->cpu_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+ goto unlock;
+ }
+ drv->attached_cpu_dev = dev;
+
+ dev_dbg(drv->dev, "using cpu clk from: %s\n",
+ dev_name(drv->attached_cpu_dev));
+
+ /*
+ * Everything related to (virtual) corners has to be initialized
+ * here, when attaching to the power domain, since we need to know
+ * the maximum frequency for each fuse corner, and this is only
+ * available after the cpufreq driver has attached to us.
+ * The reason for this is that we need to know the highest
+ * frequency associated with each fuse corner.
+ */
+ ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+ if (ret < 0) {
+ dev_err(drv->dev, "could not get OPP count\n");
+ goto unlock;
+ }
+ drv->num_corners = ret;
+
+ if (drv->num_corners < 2) {
+ dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners);
+
+ drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(*drv->corners),
+ GFP_KERNEL);
+ if (!drv->corners) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = cpr_corner_init(drv);
+ if (ret)
+ goto unlock;
+
+ cpr_set_loop_allowed(drv);
+
+ ret = cpr_init_parameters(drv);
+ if (ret)
+ goto unlock;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ goto unlock;
+
+ ret = cpr_find_initial_corner(drv);
+ if (ret)
+ goto unlock;
+
+ if (acc_desc->config)
+ regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+ acc_desc->num_regs_per_fuse);
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ struct cpr_drv *drv = s->private;
+ struct fuse_corner *fuse_corner;
+ struct corner *corner;
+
+ corner = drv->corner;
+ fuse_corner = corner->fuse_corner;
+
+ seq_printf(s, "corner, current_volt = %d uV\n",
+ corner->last_uV);
+
+ ro_sel = fuse_corner->ring_osc_idx;
+ gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+ seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+ ctl = cpr_read(drv, REG_RBCPR_CTL);
+ seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+ irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+ reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+ seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ seq_printf(s, " [step_dn = %u", step_dn);
+
+ seq_printf(s, ", step_up = %u", step_up);
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ seq_printf(s, ", error_steps = %u", error_steps);
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ seq_printf(s, ", error = %u", error);
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ seq_printf(s, ", busy = %u]\n", busy);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+ drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+ debugfs_create_file("debug_info", 0444, drv->debugfs,
+ drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ int irq, ret;
+ const struct cpr_acc_desc *data;
+ struct device_node *np;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+ data = of_device_get_match_data(dev);
+ if (!data || !data->cpr_desc || !data->acc_desc)
+ return -EINVAL;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ drv->dev = dev;
+ drv->desc = data->cpr_desc;
+ drv->acc_desc = data->acc_desc;
+
+ drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+ sizeof(*drv->fuse_corners),
+ GFP_KERNEL);
+ if (!drv->fuse_corners)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ drv->tcsr = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+
+ /*
+ * Initialize fuse corners, since it simply depends
+ * on data in efuses.
+ * Everything related to (virtual) corners has to be
+ * initialized after attaching to the power domain,
+ * since it depends on the CPU's OPP table.
+ */
+ ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
+ if (ret)
+ return ret;
+
+ drv->cpr_fuses = cpr_get_fuses(drv);
+ if (IS_ERR(drv->cpr_fuses))
+ return PTR_ERR(drv->cpr_fuses);
+
+ ret = cpr_populate_ring_osc_idx(drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_fuse_corner_init(drv);
+ if (ret)
+ return ret;
+
+ mutex_init(&drv->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+ GFP_KERNEL);
+ if (!drv->pd.name)
+ return -EINVAL;
+
+ drv->pd.power_off = cpr_power_off;
+ drv->pd.power_on = cpr_power_on;
+ drv->pd.set_performance_state = cpr_set_performance_state;
+ drv->pd.opp_to_performance_state = cpr_get_performance_state;
+ drv->pd.attach_dev = cpr_pd_attach_dev;
+
+ ret = pm_genpd_init(&drv->pd, NULL, true);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&drv->pd);
+
+ debugfs_remove_recursive(drv->debugfs);
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 2e5b6a6834da..73257cf107d9 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
+ INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index b0d1b8d264fa..475c60dccaa4 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -56,20 +56,6 @@ config PTP_1588_CLOCK_QORIQ
To compile this driver as a module, choose M here: the module
will be called ptp-qoriq.
-config PTP_1588_CLOCK_IXP46X
- tristate "Intel IXP46x as PTP clock"
- depends on IXP4XX_ETH
- depends on PTP_1588_CLOCK
- default y
- help
- This driver adds support for using the IXP46X as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
-
- To compile this driver as a module, choose M here: the module
- will be called ptp_ixp46x.
-
comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
@@ -89,6 +75,16 @@ config DP83640_PHY
In order for this to work, your MAC driver must also
implement the skb_tx_timestamp() function.
+config PTP_1588_CLOCK_INES
+ tristate "ZHAW InES PTP time stamping IP core"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on PHYLIB
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using the ZHAW InES 1588 IP
+ core. This clock is only useful if the MII bus of your MAC
+ is wired up to the core.
+
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 69a06f86a450..8c830336f178 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -6,10 +6,10 @@
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
-obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
+obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
-obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o \ No newline at end of file
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index 9263bc33b8f4..69eedda9f731 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -77,6 +77,8 @@
#define JTAG_DEVICE_ID 0x001c
#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
#define STATUS 0xc03c
#define USER_GPIO0_TO_7_STATUS 0x008a
#define USER_GPIO8_TO_15_STATUS 0x008b
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 61fafe0374ce..ac1f2bf9e888 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
{
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+ ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
@@ -302,9 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- ptp_cleanup_pin_groups(ptp);
-
posix_clock_unregister(&ptp->clock);
+
return 0;
}
EXPORT_SYMBOL(ptp_clock_unregister);
@@ -368,6 +368,12 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
}
EXPORT_SYMBOL(ptp_schedule_worker);
+void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+}
+EXPORT_SYMBOL(ptp_cancel_worker_sync);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index a5110b7b4ece..032e112c3dd9 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -405,6 +405,7 @@ static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
if (idtcm->calculate_overhead_flag) {
+ /* Assumption: I2C @ 400KHz */
total_overhead_ns = ktime_to_ns(ktime_get_raw()
- idtcm->start_time)
+ idtcm->tod_write_overhead_ns
@@ -596,44 +597,7 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
{
- return idtcm_read(idtcm,
- GENERAL_STATUS,
- HW_REV_ID,
- hw_rev_id,
- sizeof(u8));
-}
-
-static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
-{
- return idtcm_read(idtcm,
- GENERAL_STATUS,
- BOND_ID,
- bond_id,
- sizeof(u8));
-}
-
-static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
-{
- int err;
- u8 buf[2] = {0};
-
- err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
-
- *hw_csr_id = (buf[1] << 8) | buf[0];
-
- return err;
-}
-
-static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
-{
- int err;
- u8 buf[2] = {0};
-
- err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
-
- *hw_irq_id = (buf[1] << 8) | buf[0];
-
- return err;
+ return idtcm_read(idtcm, HW_REVISION, REV_ID, hw_rev_id, sizeof(u8));
}
static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
@@ -674,20 +638,11 @@ static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
sizeof(u8));
}
-static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm,
+ u8 *config_select)
{
- int err;
- u8 buf[4] = {0};
-
- err = idtcm_read(idtcm,
- GENERAL_STATUS,
- PIPELINE_ID,
- &buf[0],
- sizeof(buf));
-
- *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
-
- return err;
+ return idtcm_read(idtcm, GENERAL_STATUS, OTP_SCSR_CONFIG_SELECT,
+ config_select, sizeof(u8));
}
static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
@@ -1078,31 +1033,25 @@ static void idtcm_display_version_info(struct idtcm *idtcm)
u8 major;
u8 minor;
u8 hotfix;
- u32 pipeline;
u16 product_id;
- u16 csr_id;
- u16 irq_id;
u8 hw_rev_id;
- u8 bond_id;
+ u8 config_select;
+ char *fmt = "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d\n";
idtcm_read_major_release(idtcm, &major);
idtcm_read_minor_release(idtcm, &minor);
idtcm_read_hotfix_release(idtcm, &hotfix);
- idtcm_read_pipeline(idtcm, &pipeline);
idtcm_read_product_id(idtcm, &product_id);
idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
- idtcm_read_bond_id(idtcm, &bond_id);
- idtcm_read_hw_csr_id(idtcm, &csr_id);
- idtcm_read_hw_irq_id(idtcm, &irq_id);
-
- dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
- "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
- major, minor, hotfix, pipeline,
- product_id, hw_rev_id, bond_id, csr_id, irq_id);
+
+ idtcm_read_otp_scsr_config_select(idtcm, &config_select);
+
+ dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
+ product_id, hw_rev_id, config_select);
}
-static struct ptp_clock_info idtcm_caps = {
+static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 1,
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
new file mode 100644
index 000000000000..dfda54cbd866
--- /dev/null
+++ b/drivers/ptp/ptp_ines.c
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 MOSER-BAER AG
+//
+
+#define pr_fmt(fmt) "InES_PTP: " fmt
+
+#include <linux/ethtool.h>
+#include <linux/export.h>
+#include <linux/if_vlan.h>
+#include <linux/mii_timestamper.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/stddef.h>
+
+MODULE_DESCRIPTION("Driver for the ZHAW InES PTP time stamping IP core");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* GLOBAL register */
+#define MCAST_MAC_SELECT_SHIFT 2
+#define MCAST_MAC_SELECT_MASK 0x3
+#define IO_RESET BIT(1)
+#define PTP_RESET BIT(0)
+
+/* VERSION register */
+#define IF_MAJOR_VER_SHIFT 12
+#define IF_MAJOR_VER_MASK 0xf
+#define IF_MINOR_VER_SHIFT 8
+#define IF_MINOR_VER_MASK 0xf
+#define FPGA_MAJOR_VER_SHIFT 4
+#define FPGA_MAJOR_VER_MASK 0xf
+#define FPGA_MINOR_VER_SHIFT 0
+#define FPGA_MINOR_VER_MASK 0xf
+
+/* INT_STAT register */
+#define RX_INTR_STATUS_3 BIT(5)
+#define RX_INTR_STATUS_2 BIT(4)
+#define RX_INTR_STATUS_1 BIT(3)
+#define TX_INTR_STATUS_3 BIT(2)
+#define TX_INTR_STATUS_2 BIT(1)
+#define TX_INTR_STATUS_1 BIT(0)
+
+/* INT_MSK register */
+#define RX_INTR_MASK_3 BIT(5)
+#define RX_INTR_MASK_2 BIT(4)
+#define RX_INTR_MASK_1 BIT(3)
+#define TX_INTR_MASK_3 BIT(2)
+#define TX_INTR_MASK_2 BIT(1)
+#define TX_INTR_MASK_1 BIT(0)
+
+/* BUF_STAT register */
+#define RX_FIFO_NE_3 BIT(5)
+#define RX_FIFO_NE_2 BIT(4)
+#define RX_FIFO_NE_1 BIT(3)
+#define TX_FIFO_NE_3 BIT(2)
+#define TX_FIFO_NE_2 BIT(1)
+#define TX_FIFO_NE_1 BIT(0)
+
+/* PORT_CONF register */
+#define CM_ONE_STEP BIT(6)
+#define PHY_SPEED_SHIFT 4
+#define PHY_SPEED_MASK 0x3
+#define P2P_DELAY_WR_POS_SHIFT 2
+#define P2P_DELAY_WR_POS_MASK 0x3
+#define PTP_MODE_SHIFT 0
+#define PTP_MODE_MASK 0x3
+
+/* TS_STAT_TX register */
+#define TS_ENABLE BIT(15)
+#define DATA_READ_POS_SHIFT 8
+#define DATA_READ_POS_MASK 0x1f
+#define DISCARDED_EVENTS_SHIFT 4
+#define DISCARDED_EVENTS_MASK 0xf
+
+#define INES_N_PORTS 3
+#define INES_REGISTER_SIZE 0x80
+#define INES_PORT_OFFSET 0x20
+#define INES_PORT_SIZE 0x20
+#define INES_FIFO_DEPTH 90
+#define INES_MAX_EVENTS 100
+
+#define BC_PTP_V1 0
+#define BC_PTP_V2 1
+#define TC_E2E_PTP_V2 2
+#define TC_P2P_PTP_V2 3
+
+#define OFF_PTP_CLOCK_ID 20
+#define OFF_PTP_PORT_NUM 28
+
+#define PHY_SPEED_10 0
+#define PHY_SPEED_100 1
+#define PHY_SPEED_1000 2
+
+#define PORT_CONF \
+ ((PHY_SPEED_1000 << PHY_SPEED_SHIFT) | (BC_PTP_V2 << PTP_MODE_SHIFT))
+
+#define ines_read32(s, r) __raw_readl((void __iomem *)&s->regs->r)
+#define ines_write32(s, v, r) __raw_writel(v, (void __iomem *)&s->regs->r)
+
+#define MESSAGE_TYPE_SYNC 1
+#define MESSAGE_TYPE_P_DELAY_REQ 2
+#define MESSAGE_TYPE_P_DELAY_RESP 3
+#define MESSAGE_TYPE_DELAY_REQ 4
+
+#define SYNC 0x0
+#define DELAY_REQ 0x1
+#define PDELAY_REQ 0x2
+#define PDELAY_RESP 0x3
+
+static LIST_HEAD(ines_clocks);
+static DEFINE_MUTEX(ines_clocks_lock);
+
+struct ines_global_regs {
+ u32 id;
+ u32 test;
+ u32 global;
+ u32 version;
+ u32 test2;
+ u32 int_stat;
+ u32 int_msk;
+ u32 buf_stat;
+};
+
+struct ines_port_registers {
+ u32 port_conf;
+ u32 p_delay;
+ u32 ts_stat_tx;
+ u32 ts_stat_rx;
+ u32 ts_tx;
+ u32 ts_rx;
+};
+
+struct ines_timestamp {
+ struct list_head list;
+ unsigned long tmo;
+ u16 tag;
+ u64 sec;
+ u64 nsec;
+ u64 clkid;
+ u16 portnum;
+ u16 seqid;
+};
+
+struct ines_port {
+ struct ines_port_registers *regs;
+ struct mii_timestamper mii_ts;
+ struct ines_clock *clock;
+ bool rxts_enabled;
+ bool txts_enabled;
+ unsigned int index;
+ struct delayed_work ts_work;
+ /* lock protects event list and tx_skb */
+ spinlock_t lock;
+ struct sk_buff *tx_skb;
+ struct list_head events;
+ struct list_head pool;
+ struct ines_timestamp pool_data[INES_MAX_EVENTS];
+};
+
+struct ines_clock {
+ struct ines_port port[INES_N_PORTS];
+ struct ines_global_regs __iomem *regs;
+ void __iomem *base;
+ struct device_node *node;
+ struct device *dev;
+ struct list_head list;
+};
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev);
+static int ines_rxfifo_read(struct ines_port *port);
+static u64 ines_rxts64(struct ines_port *port, unsigned int words);
+static bool ines_timestamp_expired(struct ines_timestamp *ts);
+static u64 ines_txts64(struct ines_port *port, unsigned int words);
+static void ines_txtstamp_work(struct work_struct *work);
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type);
+static u8 tag_to_msgtype(u8 tag);
+
+static void ines_clock_cleanup(struct ines_clock *clock)
+{
+ struct ines_port *port;
+ int i;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ cancel_delayed_work_sync(&port->ts_work);
+ }
+}
+
+static int ines_clock_init(struct ines_clock *clock, struct device *device,
+ void __iomem *addr)
+{
+ struct device_node *node = device->of_node;
+ unsigned long port_addr;
+ struct ines_port *port;
+ int i, j;
+
+ INIT_LIST_HEAD(&clock->list);
+ clock->node = node;
+ clock->dev = device;
+ clock->base = addr;
+ clock->regs = clock->base;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ port_addr = (unsigned long) clock->base +
+ INES_PORT_OFFSET + i * INES_PORT_SIZE;
+ port->regs = (struct ines_port_registers *) port_addr;
+ port->clock = clock;
+ port->index = i;
+ INIT_DELAYED_WORK(&port->ts_work, ines_txtstamp_work);
+ spin_lock_init(&port->lock);
+ INIT_LIST_HEAD(&port->events);
+ INIT_LIST_HEAD(&port->pool);
+ for (j = 0; j < INES_MAX_EVENTS; j++)
+ list_add(&port->pool_data[j].list, &port->pool);
+ }
+
+ ines_write32(clock, 0xBEEF, test);
+ ines_write32(clock, 0xBEEF, test2);
+
+ dev_dbg(device, "ID 0x%x\n", ines_read32(clock, id));
+ dev_dbg(device, "TEST 0x%x\n", ines_read32(clock, test));
+ dev_dbg(device, "VERSION 0x%x\n", ines_read32(clock, version));
+ dev_dbg(device, "TEST2 0x%x\n", ines_read32(clock, test2));
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ ines_write32(port, PORT_CONF, port_conf);
+ }
+
+ return 0;
+}
+
+static struct ines_port *ines_find_port(struct device_node *node, u32 index)
+{
+ struct ines_port *port = NULL;
+ struct ines_clock *clock;
+ struct list_head *this;
+
+ mutex_lock(&ines_clocks_lock);
+ list_for_each(this, &ines_clocks) {
+ clock = list_entry(this, struct ines_clock, list);
+ if (clock->node == node) {
+ port = &clock->port[index];
+ break;
+ }
+ }
+ mutex_unlock(&ines_clocks_lock);
+ return port;
+}
+
+static u64 ines_find_rxts(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ struct list_head *this, *next;
+ struct ines_timestamp *ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ if (type == PTP_CLASS_NONE)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ines_rxfifo_read(port);
+ list_for_each_safe(this, next, &port->events) {
+ ts = list_entry(this, struct ines_timestamp, list);
+ if (ines_timestamp_expired(ts)) {
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ continue;
+ }
+ if (ines_match(skb, type, ts, port->clock->dev)) {
+ ns = ts->sec * 1000000000ULL + ts->nsec;
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ns;
+}
+
+static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb), i;
+ u32 data_rd_pos, buf_stat, mask, ts_stat_tx;
+ struct ines_timestamp ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ mask = TX_FIFO_NE_1 << port->index;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask)) {
+ dev_dbg(port->clock->dev,
+ "Tx timestamp FIFO unexpectedly empty\n");
+ break;
+ }
+ ts_stat_tx = ines_read32(port, ts_stat_tx);
+ data_rd_pos = (ts_stat_tx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev,
+ "unexpected Tx read pos %u\n", data_rd_pos);
+ break;
+ }
+
+ ts.tag = ines_read32(port, ts_tx);
+ ts.sec = ines_txts64(port, 3);
+ ts.nsec = ines_txts64(port, 2);
+ ts.clkid = ines_txts64(port, 4);
+ ts.portnum = ines_read32(port, ts_tx);
+ ts.seqid = ines_read32(port, ts_tx);
+
+ if (ines_match(skb, class, &ts, port->clock->dev)) {
+ ns = ts.sec * 1000000000ULL + ts.nsec;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ns;
+}
+
+static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
+ struct hwtstamp_config cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ ts_stat_tx = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ ts_stat_tx = TS_ENABLE;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ts_stat_tx = TS_ENABLE;
+ cm_one_step = CM_ONE_STEP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ts_stat_rx = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ts_stat_rx = TS_ENABLE;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~CM_ONE_STEP;
+ port_conf |= cm_one_step;
+
+ ines_write32(port, port_conf, port_conf);
+ ines_write32(port, ts_stat_rx, ts_stat_rx);
+ ines_write32(port, ts_stat_tx, ts_stat_tx);
+
+ port->rxts_enabled = ts_stat_rx == TS_ENABLE ? true : false;
+ port->txts_enabled = ts_stat_tx == TS_ENABLE ? true : false;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void ines_link_state(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 port_conf, speed_conf;
+ unsigned long flags;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed_conf = PHY_SPEED_10 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ speed_conf = PHY_SPEED_100 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_1000:
+ speed_conf = PHY_SPEED_1000 << PHY_SPEED_SHIFT;
+ break;
+ default:
+ dev_err(port->clock->dev, "bad speed: %d\n", phydev->speed);
+ return;
+ }
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~(0x3 << PHY_SPEED_SHIFT);
+ port_conf |= speed_conf;
+
+ ines_write32(port, port_conf, port_conf);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev)
+{
+ u8 *msgtype, *data = skb_mac_header(skb);
+ unsigned int offset = 0;
+ __be16 *portn, *seqid;
+ __be64 *clkid;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ return false;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return false;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return false;
+
+ msgtype = data + offset;
+ clkid = (__be64 *)(data + offset + OFF_PTP_CLOCK_ID);
+ portn = (__be16 *)(data + offset + OFF_PTP_PORT_NUM);
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ if (tag_to_msgtype(ts->tag & 0x7) != (*msgtype & 0xf)) {
+ dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
+ tag_to_msgtype(ts->tag & 0x7), *msgtype & 0xf);
+ return false;
+ }
+ if (cpu_to_be64(ts->clkid) != *clkid) {
+ dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
+ cpu_to_be64(ts->clkid), *clkid);
+ return false;
+ }
+ if (ts->portnum != ntohs(*portn)) {
+ dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
+ ts->portnum, ntohs(*portn));
+ return false;
+ }
+ if (ts->seqid != ntohs(*seqid)) {
+ dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
+ ts->seqid, ntohs(*seqid));
+ return false;
+ }
+
+ return true;
+}
+
+static bool ines_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (!port->rxts_enabled)
+ return false;
+
+ ns = ines_find_rxts(port, skb, type);
+ if (!ns)
+ return false;
+
+ ssh = skb_hwtstamps(skb);
+ ssh->hwtstamp = ns_to_ktime(ns);
+ netif_rx(skb);
+
+ return true;
+}
+
+static int ines_rxfifo_read(struct ines_port *port)
+{
+ u32 data_rd_pos, buf_stat, mask, ts_stat_rx;
+ struct ines_timestamp *ts;
+ unsigned int i;
+
+ mask = RX_FIFO_NE_1 << port->index;
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+ if (list_empty(&port->pool)) {
+ dev_err(port->clock->dev, "event pool is empty\n");
+ return -1;
+ }
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask))
+ break;
+
+ ts_stat_rx = ines_read32(port, ts_stat_rx);
+ data_rd_pos = (ts_stat_rx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev, "unexpected Rx read pos %u\n",
+ data_rd_pos);
+ break;
+ }
+
+ ts = list_first_entry(&port->pool, struct ines_timestamp, list);
+ ts->tmo = jiffies + HZ;
+ ts->tag = ines_read32(port, ts_rx);
+ ts->sec = ines_rxts64(port, 3);
+ ts->nsec = ines_rxts64(port, 2);
+ ts->clkid = ines_rxts64(port, 4);
+ ts->portnum = ines_read32(port, ts_rx);
+ ts->seqid = ines_read32(port, ts_rx);
+
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &port->events);
+ }
+
+ return 0;
+}
+
+static u64 ines_rxts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_rx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_rx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_timestamp_expired(struct ines_timestamp *ts)
+{
+ return time_after(jiffies, ts->tmo);
+}
+
+static int ines_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_P2P);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static u64 ines_txts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_tx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_tx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_txts_onestep(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ unsigned long flags;
+ u32 port_conf;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port_conf = ines_read32(port, port_conf);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (port_conf & CM_ONE_STEP)
+ return is_sync_pdelay_resp(skb, type);
+
+ return false;
+}
+
+static void ines_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct sk_buff *old_skb = NULL;
+ unsigned long flags;
+
+ if (!port->txts_enabled || ines_txts_onestep(port, skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (port->tx_skb)
+ old_skb = port->tx_skb;
+
+ port->tx_skb = skb;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (old_skb)
+ kfree_skb(old_skb);
+
+ schedule_delayed_work(&port->ts_work, 1);
+}
+
+static void ines_txtstamp_work(struct work_struct *work)
+{
+ struct ines_port *port =
+ container_of(work, struct ines_port, ts_work.work);
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&port->lock, flags);
+ skb = port->tx_skb;
+ port->tx_skb = NULL;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ns = ines_find_txts(port, skb);
+ if (!ns) {
+ kfree_skb(skb);
+ return;
+ }
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_complete_tx_timestamp(skb, &ssh);
+}
+
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
+{
+ u8 *data = skb->data, *msgtype;
+ unsigned int offset = 0;
+
+ if (type & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (type & PTP_CLASS_V1)
+ offset += OFF_PTP_CONTROL;
+
+ if (skb->len < offset + 1)
+ return 0;
+
+ msgtype = data + offset;
+
+ switch ((*msgtype & 0xf)) {
+ case SYNC:
+ case PDELAY_RESP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 tag_to_msgtype(u8 tag)
+{
+ switch (tag) {
+ case MESSAGE_TYPE_SYNC:
+ return SYNC;
+ case MESSAGE_TYPE_P_DELAY_REQ:
+ return PDELAY_REQ;
+ case MESSAGE_TYPE_P_DELAY_RESP:
+ return PDELAY_RESP;
+ case MESSAGE_TYPE_DELAY_REQ:
+ return DELAY_REQ;
+ }
+ return 0xf;
+}
+
+static struct mii_timestamper *ines_ptp_probe_channel(struct device *device,
+ unsigned int index)
+{
+ struct device_node *node = device->of_node;
+ struct ines_port *port;
+
+ if (index > INES_N_PORTS - 1) {
+ dev_err(device, "bad port index %u\n", index);
+ return ERR_PTR(-EINVAL);
+ }
+ port = ines_find_port(node, index);
+ if (!port) {
+ dev_err(device, "missing port index %u\n", index);
+ return ERR_PTR(-ENODEV);
+ }
+ port->mii_ts.rxtstamp = ines_rxtstamp;
+ port->mii_ts.txtstamp = ines_txtstamp;
+ port->mii_ts.hwtstamp = ines_hwtstamp;
+ port->mii_ts.link_state = ines_link_state;
+ port->mii_ts.ts_info = ines_ts_info;
+
+ return &port->mii_ts;
+}
+
+static void ines_ptp_release_channel(struct device *device,
+ struct mii_timestamper *mii_ts)
+{
+}
+
+static struct mii_timestamping_ctrl ines_ctrl = {
+ .probe_channel = ines_ptp_probe_channel,
+ .release_channel = ines_ptp_release_channel,
+};
+
+static int ines_ptp_ctrl_probe(struct platform_device *pld)
+{
+ struct ines_clock *clock;
+ struct resource *res;
+ void __iomem *addr;
+ int err = 0;
+
+ res = platform_get_resource(pld, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pld->dev, "missing memory resource\n");
+ return -EINVAL;
+ }
+ addr = devm_ioremap_resource(&pld->dev, res);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto out;
+ }
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto out;
+ }
+ if (ines_clock_init(clock, &pld->dev, addr)) {
+ kfree(clock);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = register_mii_tstamp_controller(&pld->dev, &ines_ctrl);
+ if (err) {
+ kfree(clock);
+ goto out;
+ }
+ mutex_lock(&ines_clocks_lock);
+ list_add_tail(&ines_clocks, &clock->list);
+ mutex_unlock(&ines_clocks_lock);
+
+ dev_set_drvdata(&pld->dev, clock);
+out:
+ return err;
+}
+
+static int ines_ptp_ctrl_remove(struct platform_device *pld)
+{
+ struct ines_clock *clock = dev_get_drvdata(&pld->dev);
+
+ unregister_mii_tstamp_controller(&pld->dev);
+ mutex_lock(&ines_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&ines_clocks_lock);
+ ines_clock_cleanup(clock);
+ kfree(clock);
+ return 0;
+}
+
+static const struct of_device_id ines_ptp_ctrl_of_match[] = {
+ { .compatible = "ines,ptp-ctrl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ines_ptp_ctrl_of_match);
+
+static struct platform_driver ines_ptp_ctrl_driver = {
+ .probe = ines_ptp_ctrl_probe,
+ .remove = ines_ptp_ctrl_remove,
+ .driver = {
+ .name = "ines_ptp_ctrl",
+ .of_match_table = of_match_ptr(ines_ptp_ctrl_of_match),
+ },
+};
+module_platform_driver(ines_ptp_ctrl_driver);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index a577218d1ab7..b27c46ebfc8f 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -74,14 +74,13 @@ static void set_fipers(struct ptp_qoriq *ptp_qoriq)
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
}
-static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
- bool update_event)
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
void __iomem *reg_etts_l;
void __iomem *reg_etts_h;
- u32 valid, stat, lo, hi;
+ u32 valid, lo, hi;
switch (index) {
case 0:
@@ -101,6 +100,10 @@ static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
event.type = PTP_CLOCK_EXTTS;
event.index = index;
+ if (ptp_qoriq->extts_fifo_support)
+ if (!(ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid))
+ return 0;
+
do {
lo = ptp_qoriq->read(reg_etts_l);
hi = ptp_qoriq->read(reg_etts_h);
@@ -111,11 +114,13 @@ static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
ptp_clock_event(ptp_qoriq->clock, &event);
}
- stat = ptp_qoriq->read(&regs->ctrl_regs->tmr_stat);
- } while (ptp_qoriq->extts_fifo_support && (stat & valid));
+ if (!ptp_qoriq->extts_fifo_support)
+ break;
+ } while (ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid);
return 0;
}
+EXPORT_SYMBOL_GPL(extts_clean_up);
/*
* Interrupt service routine
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 74eb5af7295f..97bfdd47954f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -194,6 +194,18 @@ config REGULATOR_BD70528
This driver can also be built as a module. If so, the module
will be called bd70528-regulator.
+config REGULATOR_BD71828
+ tristate "ROHM BD71828 Power Regulator"
+ depends on MFD_ROHM_BD71828
+ select REGULATOR_ROHM
+ help
+ This driver supports voltage regulators on ROHM BD71828 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd71828-regulator.
+
config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
depends on MFD_ROHM_BD718XX
@@ -600,6 +612,27 @@ config REGULATOR_MCP16502
through the regulator interface. In addition it enables
suspend-to-ram/standby transition.
+config REGULATOR_MP8859
+ tristate "MPS MP8859 regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the MP8859 voltage regulator. This driver
+ supports basic operations (get/set voltage) through the regulator
+ interface.
+ Say M here if you want to include support for the regulator as a
+ module. The module will be named "mp8859".
+
+config REGULATOR_MPQ7920
+ tristate "Monolithic MPQ7920 PMIC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the MPQ7920 PMIC. This will enable supports
+ the software controllable 4 buck and 5 LDO regulators.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6311
tristate "MediaTek MT6311 PMIC"
depends on I2C
@@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS
This driver provides support for voltage regulators available
on the ARM Ltd's Versatile Express platform.
+config REGULATOR_VQMMC_IPQ4019
+ tristate "IPQ4019 VQMMC SD LDO regulator support"
+ depends on ARCH_QCOM
+ help
+ This driver provides support for the VQMMC LDO I/0
+ voltage regulator of the IPQ4019 SD/EMMC controller.
+
config REGULATOR_WM831X
tristate "Wolfson Microelectronics WM831x PMIC regulators"
depends on MFD_WM831X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2210ba56f9bd..07bc977c52b0 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
+obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
@@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
+obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
+obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
@@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
+obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
new file mode 100644
index 000000000000..b2fa17be4988
--- /dev/null
+++ b/drivers/regulator/bd71828-regulator.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019 ROHM Semiconductors
+// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver
+//
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct reg_init {
+ unsigned int reg;
+ unsigned int mask;
+ unsigned int val;
+};
+struct bd71828_regulator_data {
+ struct regulator_desc desc;
+ const struct rohm_dvs_config dvs;
+ const struct reg_init *reg_inits;
+ int reg_init_amnt;
+};
+
+static const struct reg_init buck1_inits[] = {
+ /*
+ * DVS Buck voltages can be changed by register values or via GPIO.
+ * Use register accesses by default.
+ */
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK1_CTRL,
+ .val = BD71828_DVS_BUCK1_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck2_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK2_CTRL,
+ .val = BD71828_DVS_BUCK2_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck6_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK6_CTRL,
+ .val = BD71828_DVS_BUCK6_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck7_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK7_CTRL,
+ .val = BD71828_DVS_BUCK7_CTRL_I2C,
+ },
+};
+
+static const struct regulator_linear_range bd71828_buck1267_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250),
+ REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck3_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck4_volts[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck5_volts[] = {
+ REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_ldo_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0),
+};
+
+static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int val;
+
+ switch (ramp_delay) {
+ case 1 ... 2500:
+ val = 0;
+ break;
+ case 2501 ... 5000:
+ val = 1;
+ break;
+ case 5001 ... 10000:
+ val = 2;
+ break;
+ case 10001 ... 20000:
+ val = 3;
+ break;
+ default:
+ val = 3;
+ dev_err(&rdev->dev,
+ "ramp_delay: %d not supported, setting 20mV/uS",
+ ramp_delay);
+ }
+
+ /*
+ * On BD71828 the ramp delay level control reg is at offset +2 to
+ * enable reg
+ */
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2,
+ BD71828_MASK_RAMP_DELAY,
+ val << (ffs(BD71828_MASK_RAMP_DELAY) - 1));
+}
+
+static int buck_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct bd71828_regulator_data *data;
+
+ data = container_of(desc, struct bd71828_regulator_data, desc);
+
+ return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
+}
+
+static int ldo6_parse_dt(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ uint32_t uv = 0;
+ unsigned int en;
+ struct regmap *regmap = cfg->regmap;
+ static const char * const props[] = { "rohm,dvs-run-voltage",
+ "rohm,dvs-idle-voltage",
+ "rohm,dvs-suspend-voltage",
+ "rohm,dvs-lpsr-voltage" };
+ unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN,
+ BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN };
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ ret = of_property_read_u32(np, props[i], &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ continue;
+ }
+ if (uv)
+ en = 0xffffffff;
+ else
+ en = 0;
+
+ ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct regulator_ops bd71828_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_dvs_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd71828_set_ramp_delay,
+};
+
+static const struct regulator_ops bd71828_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_ldo6_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct bd71828_regulator_data bd71828_rdata[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK1,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK1_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK1_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ /*
+ * LPSR voltage is same as SUSPEND voltage. Allow
+ * setting it so that regulator can be set enabled at
+ * LPSR state
+ */
+ .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck1_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck1_inits),
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK2,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK2_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK2_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck2_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck2_inits),
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK3,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts),
+ .n_voltages = BD71828_BUCK3_VOLTS,
+ .enable_reg = BD71828_REG_BUCK3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK3_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK3_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK3_VOLT,
+ .idle_reg = BD71828_REG_BUCK3_VOLT,
+ .suspend_reg = BD71828_REG_BUCK3_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ .run_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_mask = BD71828_MASK_BUCK3_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK3_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK4,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck4_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts),
+ .n_voltages = BD71828_BUCK4_VOLTS,
+ .enable_reg = BD71828_REG_BUCK4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK4_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK4_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK4 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK4_VOLT,
+ .idle_reg = BD71828_REG_BUCK4_VOLT,
+ .suspend_reg = BD71828_REG_BUCK4_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ .run_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_mask = BD71828_MASK_BUCK4_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK4_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK5,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck5_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts),
+ .n_voltages = BD71828_BUCK5_VOLTS,
+ .enable_reg = BD71828_REG_BUCK5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK5_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK5_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK5 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK5_VOLT,
+ .idle_reg = BD71828_REG_BUCK5_VOLT,
+ .suspend_reg = BD71828_REG_BUCK5_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ .run_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_mask = BD71828_MASK_BUCK5_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK5_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK6,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK6_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK6_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck6_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck6_inits),
+ },
+ {
+ .desc = {
+ .name = "buck7",
+ .of_match = of_match_ptr("BUCK7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK7,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK7_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK7_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck7_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck7_inits),
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO1,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO1_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO1_VOLT,
+ .idle_reg = BD71828_REG_LDO1_VOLT,
+ .suspend_reg = BD71828_REG_LDO1_VOLT,
+ .lpsr_reg = BD71828_REG_LDO1_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO2,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO2_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO2 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO2_VOLT,
+ .idle_reg = BD71828_REG_LDO2_VOLT,
+ .suspend_reg = BD71828_REG_LDO2_VOLT,
+ .lpsr_reg = BD71828_REG_LDO2_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO3,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO3_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO3_VOLT,
+ .idle_reg = BD71828_REG_LDO3_VOLT,
+ .suspend_reg = BD71828_REG_LDO3_VOLT,
+ .lpsr_reg = BD71828_REG_LDO3_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO4,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO4_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO4_VOLT,
+ .idle_reg = BD71828_REG_LDO4_VOLT,
+ .suspend_reg = BD71828_REG_LDO4_VOLT,
+ .lpsr_reg = BD71828_REG_LDO4_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO5,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO5_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ .owner = THIS_MODULE,
+ },
+ /*
+ * LDO5 is special. It can choose vsel settings to be configured
+ * from 2 different registers (by GPIO).
+ *
+ * This driver supports only configuration where
+ * BD71828_REG_LDO5_VOLT_L is used.
+ */
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO5_VOLT,
+ .idle_reg = BD71828_REG_LDO5_VOLT,
+ .suspend_reg = BD71828_REG_LDO5_VOLT,
+ .lpsr_reg = BD71828_REG_LDO5_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO6,
+ .ops = &bd71828_ldo6_ops,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD71828_LDO_6_VOLTAGE,
+ .n_voltages = 1,
+ .enable_reg = BD71828_REG_LDO6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .owner = THIS_MODULE,
+ /*
+ * LDO6 only supports enable/disable for all states.
+ * Voltage for LDO6 is fixed.
+ */
+ .of_parse_cb = ldo6_parse_dt,
+ },
+ }, {
+ .desc = {
+ /* SNVS LDO in data-sheet */
+ .name = "ldo7",
+ .of_match = of_match_ptr("LDO7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO_SNVS,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO7_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO7 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO7_VOLT,
+ .idle_reg = BD71828_REG_LDO7_VOLT,
+ .suspend_reg = BD71828_REG_LDO7_VOLT,
+ .lpsr_reg = BD71828_REG_LDO7_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ },
+};
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd71828;
+ int i, j, ret;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ };
+
+ bd71828 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd71828) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ config.regmap = bd71828->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) {
+ struct regulator_dev *rdev;
+ const struct bd71828_regulator_data *rd;
+
+ rd = &bd71828_rdata[i];
+ rdev = devm_regulator_register(&pdev->dev,
+ &rd->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ rd->desc.name);
+ return PTR_ERR(rdev);
+ }
+ for (j = 0; j < rd->reg_init_amnt; j++) {
+ ret = regmap_update_bits(bd71828->regmap,
+ rd->reg_inits[j].reg,
+ rd->reg_inits[j].mask,
+ rd->reg_inits[j].val);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator %s init failed\n",
+ rd->desc.name);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver bd71828_regulator = {
+ .driver = {
+ .name = "bd71828-pmic"
+ },
+ .probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-pmic");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 13a43eee2e46..8f9b2d8eaf10 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
},
};
-struct bd718xx_pmic_inits {
- const struct bd718xx_regulator_data *r_datas;
- unsigned int r_amount;
-};
-
static int bd718xx_probe(struct platform_device *pdev)
{
struct bd718xx *mfd;
struct regulator_config config = { 0 };
- struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = {
- [ROHM_CHIP_TYPE_BD71837] = {
- .r_datas = bd71837_regulators,
- .r_amount = ARRAY_SIZE(bd71837_regulators),
- },
- [ROHM_CHIP_TYPE_BD71847] = {
- .r_datas = bd71847_regulators,
- .r_amount = ARRAY_SIZE(bd71847_regulators),
- },
- };
-
int i, j, err;
bool use_snvs;
+ const struct bd718xx_regulator_data *reg_data;
+ unsigned int num_reg_data;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
@@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev)
goto err;
}
- if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT ||
- !pmic_regulators[mfd->chip.chip_type].r_datas) {
+ switch (mfd->chip.chip_type) {
+ case ROHM_CHIP_TYPE_BD71837:
+ reg_data = bd71837_regulators;
+ num_reg_data = ARRAY_SIZE(bd71837_regulators);
+ break;
+ case ROHM_CHIP_TYPE_BD71847:
+ reg_data = bd71847_regulators;
+ num_reg_data = ARRAY_SIZE(bd71847_regulators);
+ break;
+ default:
dev_err(&pdev->dev, "Unsupported chip type\n");
err = -EINVAL;
goto err;
@@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) {
+ for (i = 0; i < num_reg_data; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
const struct bd718xx_regulator_data *r;
- r = &pmic_regulators[mfd->chip.chip_type].r_datas[i];
+ r = &reg_data[i];
desc = &r->desc;
config.dev = pdev->dev.parent;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 03d79fee2987..d015d99cb59d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3470,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
out:
return ret;
}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
static int regulator_limit_voltage_step(struct regulator_dev *rdev,
int *current_uV, int *min_uV)
@@ -4034,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
return ret;
return ret - rdev->constraints->uV_offset;
}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
/**
* regulator_get_voltage - get regulator output voltage
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f9448ed50e05..0cdeb6186529 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, da9210_dt_ids);
-static int da9210_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9210_i2c_probe(struct i2c_client *i2c)
{
struct da9210 *chip;
struct device *dev = &i2c->dev;
@@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = {
.name = "da9210",
.of_match_table = of_match_ptr(da9210_dt_ids),
},
- .probe = da9210_i2c_probe,
+ .probe_new = da9210_i2c_probe,
.id_table = da9210_i2c_id,
};
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 523dc1b95826..2ea4362ffa5c 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip)
/*
* I2C driver interface functions
*/
-static int da9211_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9211_i2c_probe(struct i2c_client *i2c)
{
struct da9211 *chip;
int error, ret;
@@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = {
.name = "da9211",
.of_match_table = of_match_ptr(da9211_dt_ids),
},
- .probe = da9211_i2c_probe,
+ .probe_new = da9211_i2c_probe,
.id_table = da9211_i2c_id,
};
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index ca3dc3f3bb29..bb16c465426e 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -13,6 +13,8 @@
#include <linux/regulator/driver.h>
#include <linux/module.h>
+#include "internal.h"
+
/**
* regulator_is_enabled_regmap - standard is_enabled() for regmap users
*
@@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
consumers[i].supply = supply_names[i];
}
EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
+
+/**
+ * regulator_is_equal - test whether two regulators are the same
+ *
+ * @reg1: first regulator to operate on
+ * @reg2: second regulator to operate on
+ */
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return reg1->rdev == reg2->rdev;
+}
+EXPORT_SYMBOL_GPL(regulator_is_equal);
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 978f5e903cae..cfb765986d0d 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-static int isl9305_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int isl9305_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct isl9305_pdata *pdata = i2c->dev.platform_data;
@@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = {
.name = "isl9305",
.of_match_table = of_match_ptr(isl9305_dt_ids),
},
- .probe = isl9305_i2c_probe,
+ .probe_new = isl9305_i2c_probe,
.id_table = isl9305_i2c_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index bc96e65ef7c0..8be252f81b09 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971,
return 0;
}
-static int lp3971_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int lp3971_i2c_probe(struct i2c_client *i2c)
{
struct lp3971 *lp3971;
struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
@@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = {
.driver = {
.name = "LP3971",
},
- .probe = lp3971_i2c_probe,
+ .probe_new = lp3971_i2c_probe,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index d934540eb8c4..e12e52c69e52 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ltc3676_regulator_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc3676_regulator_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_init_data *init_data = dev_get_platdata(dev);
@@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ltc3676_of_match),
},
- .probe = ltc3676_regulator_probe,
+ .probe_new = ltc3676_regulator_probe,
.id_table = ltc3676_i2c_id,
};
module_i2c_driver(ltc3676_driver);
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
new file mode 100644
index 000000000000..1d26b506ee5b
--- /dev/null
+++ b/drivers/regulator/mp8859.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 five technologies GmbH
+// Author: Markus Reichl <m.reichl@fivetechno.de>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+
+
+#define VOL_MIN_IDX 0x00
+#define VOL_MAX_IDX 0x7ff
+
+/* Register definitions */
+#define MP8859_VOUT_L_REG 0 //3 lo Bits
+#define MP8859_VOUT_H_REG 1 //8 hi Bits
+#define MP8859_VOUT_GO_REG 2
+#define MP8859_IOUT_LIM_REG 3
+#define MP8859_CTL1_REG 4
+#define MP8859_CTL2_REG 5
+#define MP8859_RESERVED1_REG 6
+#define MP8859_RESERVED2_REG 7
+#define MP8859_RESERVED3_REG 8
+#define MP8859_STATUS_REG 9
+#define MP8859_INTERRUPT_REG 0x0A
+#define MP8859_MASK_REG 0x0B
+#define MP8859_ID1_REG 0x0C
+#define MP8859_MFR_ID_REG 0x27
+#define MP8859_DEV_ID_REG 0x28
+#define MP8859_IC_REV_REG 0x29
+
+#define MP8859_MAX_REG 0x29
+
+#define MP8859_GO_BIT 0x01
+
+
+static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7);
+
+ if (ret)
+ return ret;
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3);
+
+ if (ret)
+ return ret;
+ ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG,
+ MP8859_GO_BIT, 1);
+ return ret;
+}
+
+static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
+{
+ unsigned int val_tmp;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val = val_tmp << 3;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val |= val_tmp & 0x07;
+ return val;
+}
+
+static const struct regulator_linear_range mp8859_dcdc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
+};
+
+static const struct regmap_config mp8859_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MP8859_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regulator_ops mp8859_ops = {
+ .set_voltage_sel = mp8859_set_voltage_sel,
+ .get_voltage_sel = mp8859_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+};
+
+static const struct regulator_desc mp8859_regulators[] = {
+ {
+ .id = 0,
+ .type = REGULATOR_VOLTAGE,
+ .name = "mp8859_dcdc",
+ .of_match = of_match_ptr("mp8859_dcdc"),
+ .n_voltages = VOL_MAX_IDX + 1,
+ .linear_ranges = mp8859_dcdc_ranges,
+ .n_linear_ranges = 1,
+ .ops = &mp8859_ops,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int mp8859_i2c_probe(struct i2c_client *i2c)
+{
+ int ret;
+ struct regulator_config config = {.dev = &i2c->dev};
+ struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
+ struct regulator_dev *rdev;
+
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+ rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
+ &config);
+
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "failed to register %s: %d\n",
+ mp8859_regulators[0].name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct of_device_id mp8859_dt_id[] = {
+ {.compatible = "mps,mp8859"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mp8859_dt_id);
+
+static const struct i2c_device_id mp8859_i2c_id[] = {
+ { "mp8859", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id);
+
+static struct i2c_driver mp8859_regulator_driver = {
+ .driver = {
+ .name = "mp8859",
+ .of_match_table = of_match_ptr(mp8859_dt_id),
+ },
+ .probe_new = mp8859_i2c_probe,
+ .id_table = mp8859_i2c_id,
+};
+
+module_i2c_driver(mp8859_regulator_driver);
+
+MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver");
+MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c
new file mode 100644
index 000000000000..54c862edf571
--- /dev/null
+++ b/drivers/regulator/mpq7920.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// mpq7920.c - regulator driver for mps mpq7920
+//
+// Copyright 2019 Monolithic Power Systems, Inc
+//
+// Author: Saravanan Sekar <sravanhome@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include "mpq7920.h"
+
+#define MPQ7920_BUCK_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+#define MPQ7920_LDO_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+
+#define MPQ7920BUCK(_name, _id, _ilim) \
+ [MPQ7920_BUCK ## _id] = { \
+ .id = MPQ7920_BUCK ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .of_parse_cb = mpq7920_parse_cb, \
+ .ops = &mpq7920_buck_ops, \
+ .min_uV = MPQ7920_BUCK_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \
+ .curr_table = _ilim, \
+ .n_current_limits = ARRAY_SIZE(_ilim), \
+ .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .csel_mask = MPQ7920_MASK_BUCK_ILIM, \
+ .enable_reg = MPQ7920_REG_REGULATOR_EN, \
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_BUCK ## _id), \
+ .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .soft_start_mask = MPQ7920_MASK_SOFTSTART, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \
+ [MPQ7920_LDO ## _id] = { \
+ .id = MPQ7920_LDO ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .ops = _ops, \
+ .min_uV = MPQ7920_LDO_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_LDO_VOLT_RANGE, \
+ .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .curr_table = _ilim, \
+ .n_current_limits = _ilim_sz, \
+ .csel_reg = _creg, \
+ .csel_mask = _cmask, \
+ .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_LDO ##_id + 1), \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+enum mpq7920_regulators {
+ MPQ7920_BUCK1,
+ MPQ7920_BUCK2,
+ MPQ7920_BUCK3,
+ MPQ7920_BUCK4,
+ MPQ7920_LDO1, /* LDORTC */
+ MPQ7920_LDO2,
+ MPQ7920_LDO3,
+ MPQ7920_LDO4,
+ MPQ7920_LDO5,
+ MPQ7920_MAX_REGULATORS,
+};
+
+struct mpq7920_regulator_info {
+ struct regmap *regmap;
+ struct regulator_desc *rdesc;
+};
+
+static const struct regmap_config mpq7920_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x25,
+};
+
+/* Current limits array (in uA)
+ * ILIM1 & ILIM3
+ */
+static const unsigned int mpq7920_I_limits1[] = {
+ 4600000, 6600000, 7600000, 9300000
+};
+
+/* ILIM2 & ILIM4 */
+static const unsigned int mpq7920_I_limits2[] = {
+ 2700000, 3900000, 5100000, 6100000
+};
+
+/* LDO4 & LDO5 */
+static const unsigned int mpq7920_I_limits3[] = {
+ 300000, 700000
+};
+
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *rdesc,
+ struct regulator_config *config);
+
+/* RTCLDO not controllable, always ON */
+static const struct regulator_ops mpq7920_ldortc_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_wo_current_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+};
+
+static const struct regulator_ops mpq7920_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_soft_start = regulator_set_soft_start_regmap,
+ .set_ramp_delay = mpq7920_set_ramp_delay,
+};
+
+static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = {
+ MPQ7920BUCK("buck1", 1, mpq7920_I_limits1),
+ MPQ7920BUCK("buck2", 2, mpq7920_I_limits2),
+ MPQ7920BUCK("buck3", 3, mpq7920_I_limits1),
+ MPQ7920BUCK("buck4", 4, mpq7920_I_limits2),
+ MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+ MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+};
+
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00-01: Reserved
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_val;
+
+ if (ramp_delay > 8000 || ramp_delay < 0)
+ return -EINVAL;
+
+ if (ramp_delay <= 4000)
+ ramp_val = 3;
+ else
+ ramp_val = 2;
+
+ return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6);
+}
+
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ uint8_t val;
+ int ret;
+ struct mpq7920_regulator_info *info = config->driver_data;
+ struct regulator_desc *rdesc = &info->rdesc[desc->id];
+
+ if (of_property_read_bool(np, "mps,buck-ovp-disable")) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_B + (rdesc->id * 4),
+ MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-phase-delay", &val);
+ if (!ret) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_C + (rdesc->id * 4),
+ MPQ7920_MASK_BUCK_PHASE_DEALY,
+ (val & 3) << 4);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-softstart", &val);
+ if (!ret)
+ rdesc->soft_start_val_on = (val & 3) << 2;
+
+ return 0;
+}
+
+static void mpq7920_parse_dt(struct device *dev,
+ struct mpq7920_regulator_info *info)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ uint8_t freq;
+
+ np = of_get_child_by_name(np, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return;
+ }
+
+ ret = of_property_read_u8(np, "mps,switch-freq", &freq);
+ if (!ret) {
+ regmap_update_bits(info->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_SWITCH_FREQ,
+ (freq & 3) << 4);
+ }
+
+ of_node_put(np);
+}
+
+static int mpq7920_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mpq7920_regulator_info *info;
+ struct regulator_config config = { NULL, };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->rdesc = mpq7920_regulators_desc;
+ regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, info);
+ info->regmap = regmap;
+ if (client->dev.of_node)
+ mpq7920_parse_dt(&client->dev, info);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ config.driver_data = info;
+
+ for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &mpq7920_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mpq7920_of_match[] = {
+ { .compatible = "mps,mpq7920"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpq7920_of_match);
+
+static const struct i2c_device_id mpq7920_id[] = {
+ { "mpq7920", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq7920_id);
+
+static struct i2c_driver mpq7920_regulator_driver = {
+ .driver = {
+ .name = "mpq7920",
+ .of_match_table = of_match_ptr(mpq7920_of_match),
+ },
+ .probe_new = mpq7920_i2c_probe,
+ .id_table = mpq7920_id,
+};
+module_i2c_driver(mpq7920_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h
new file mode 100644
index 000000000000..489924655a96
--- /dev/null
+++ b/drivers/regulator/mpq7920.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * mpq7920.h - Regulator definitions for mpq7920
+ *
+ * Copyright 2019 Monolithic Power Systems, Inc
+ *
+ */
+
+#ifndef __MPQ7920_H__
+#define __MPQ7920_H__
+
+#define MPQ7920_REG_CTL0 0x00
+#define MPQ7920_REG_CTL1 0x01
+#define MPQ7920_REG_CTL2 0x02
+#define MPQ7920_BUCK1_REG_A 0x03
+#define MPQ7920_BUCK1_REG_B 0x04
+#define MPQ7920_BUCK1_REG_C 0x05
+#define MPQ7920_BUCK1_REG_D 0x06
+#define MPQ7920_BUCK2_REG_A 0x07
+#define MPQ7920_BUCK2_REG_B 0x08
+#define MPQ7920_BUCK2_REG_C 0x09
+#define MPQ7920_BUCK2_REG_D 0x0a
+#define MPQ7920_BUCK3_REG_A 0x0b
+#define MPQ7920_BUCK3_REG_B 0x0c
+#define MPQ7920_BUCK3_REG_C 0x0d
+#define MPQ7920_BUCK3_REG_D 0x0e
+#define MPQ7920_BUCK4_REG_A 0x0f
+#define MPQ7920_BUCK4_REG_B 0x10
+#define MPQ7920_BUCK4_REG_C 0x11
+#define MPQ7920_BUCK4_REG_D 0x12
+#define MPQ7920_LDO1_REG_A 0x13
+#define MPQ7920_LDO1_REG_B 0x0
+#define MPQ7920_LDO2_REG_A 0x14
+#define MPQ7920_LDO2_REG_B 0x15
+#define MPQ7920_LDO2_REG_C 0x16
+#define MPQ7920_LDO3_REG_A 0x17
+#define MPQ7920_LDO3_REG_B 0x18
+#define MPQ7920_LDO3_REG_C 0x19
+#define MPQ7920_LDO4_REG_A 0x1a
+#define MPQ7920_LDO4_REG_B 0x1b
+#define MPQ7920_LDO4_REG_C 0x1c
+#define MPQ7920_LDO5_REG_A 0x1d
+#define MPQ7920_LDO5_REG_B 0x1e
+#define MPQ7920_LDO5_REG_C 0x1f
+#define MPQ7920_REG_MODE 0x20
+#define MPQ7920_REG_REGULATOR_EN 0x22
+
+#define MPQ7920_MASK_VREF 0x7f
+#define MPQ7920_MASK_BUCK_ILIM 0xc0
+#define MPQ7920_MASK_LDO_ILIM BIT(6)
+#define MPQ7920_MASK_DISCHARGE BIT(5)
+#define MPQ7920_MASK_MODE 0xc0
+#define MPQ7920_MASK_SOFTSTART 0x0c
+#define MPQ7920_MASK_SWITCH_FREQ 0x30
+#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30
+#define MPQ7920_MASK_DVS_SLEWRATE 0xc0
+#define MPQ7920_MASK_OVP 0x40
+#define MPQ7920_OVP_DISABLE ~(0x40)
+#define MPQ7920_DISCHARGE_ON BIT(5)
+
+#define MPQ7920_REGULATOR_EN_OFFSET 7
+
+/* values in mV */
+#define MPQ7920_BUCK_VOLT_MIN 400000
+#define MPQ7920_LDO_VOLT_MIN 650000
+#define MPQ7920_VOLT_MAX 3587500
+#define MPQ7920_VOLT_STEP 12500
+
+#endif /* __MPQ7920_H__ */
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index af95449d3590..69e6af3cd505 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = {
/*
* I2C driver interface functions
*/
-static int mt6311_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int mt6311_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = {
.name = "mt6311",
.of_match_table = of_match_ptr(mt6311_dt_ids),
},
- .probe = mt6311_i2c_probe,
+ .probe_new = mt6311_i2c_probe,
.id_table = mt6311_i2c_id,
};
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 3d3415839ba2..787ced918372 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -279,8 +279,7 @@ error_i2c:
/*
* I2C driver interface functions
*/
-static int pv88060_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88060_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88060 *chip;
@@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = {
.name = "pv88060",
.of_match_table = of_match_ptr(pv88060_dt_ids),
},
- .probe = pv88060_i2c_probe,
+ .probe_new = pv88060_i2c_probe,
.id_table = pv88060_i2c_id,
};
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index b1d0d97ae935..784729ec2182 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -272,8 +272,7 @@ error_i2c:
/*
* I2C driver interface functions
*/
-static int pv88090_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88090_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88090 *chip;
@@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = {
.name = "pv88090",
.of_match_table = of_match_ptr(pv88090_dt_ids),
},
- .probe = pv88090_i2c_probe,
+ .probe_new = pv88090_i2c_probe,
.id_table = pv88090_i2c_id,
};
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 5b4003226484..31f79fda3238 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
}
if (!pdata->dvs_gpio[i]) {
- dev_warn(dev, "there is no dvs%d gpio\n", i);
+ dev_info(dev, "there is no dvs%d gpio\n", i);
continue;
}
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 51f7e8b74d8c..115f59530852 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 4f2dc5ebffdc..23d288278957 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 12d6b8d2e97e..4abd3ed31f60 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index bf1a3508ebc4..44e4cecbf6de 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip)
dev_dbg(chip->dev, "Fault log: FLT_POR\n");
}
-static int slg51000_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int slg51000_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct slg51000 *chip;
@@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = {
.driver = {
.name = "slg51000-regulator",
},
- .probe = slg51000_i2c_probe,
+ .probe_new = slg51000_i2c_probe,
.id_table = slg51000_i2c_id,
};
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 42e03b2c10a0..2222e739e62b 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = {
/*
* I2C driver interface functions
*/
-static int sy8106a_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sy8106a_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_dev *rdev;
@@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = {
.name = "sy8106a",
.of_match_table = of_match_ptr(sy8106a_i2c_of_match),
},
- .probe = sy8106a_i2c_probe,
+ .probe_new = sy8106a_i2c_probe,
.id_table = sy8106a_i2c_id,
};
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
index 92adb4f3ee19..62d243f3b904 100644
--- a/drivers/regulator/sy8824x.c
+++ b/drivers/regulator/sy8824x.c
@@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = {
.val_bits = 8,
};
-static int sy8824_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sy8824_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
@@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = {
.name = "sy8824-regulator",
.of_match_table = of_match_ptr(sy8824_dt_ids),
},
- .probe = sy8824_i2c_probe,
+ .probe_new = sy8824_i2c_probe,
.id_table = sy8824_id,
};
module_i2c_driver(sy8824_regulator_driver);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 89b9314d64c9..af9abcd9c166 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -748,7 +748,7 @@ static int ti_abb_probe(struct platform_device *pdev)
* We may have shared interrupt register offsets which are
* write-1-to-clear between domains ensuring exclusivity.
*/
- abb->int_base = devm_ioremap_nocache(dev, res->start,
+ abb->int_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!abb->int_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
@@ -768,7 +768,7 @@ static int ti_abb_probe(struct platform_device *pdev)
* We may have shared efuse register offsets which are read-only
* between domains
*/
- abb->efuse_base = devm_ioremap_nocache(dev, res->start,
+ abb->efuse_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!abb->efuse_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index 7b0e38f8d627..0edc83089ba2 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = {
.wr_table = &tps65132_no_reg_table,
};
-static int tps65132_probe(struct i2c_client *client,
- const struct i2c_device_id *client_id)
+static int tps65132_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps65132_regulator *tps;
@@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = {
.driver = {
.name = "tps65132",
},
- .probe = tps65132_probe,
+ .probe_new = tps65132_probe,
.id_table = tps65132_id,
};
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index 9a9ee8188109..cbadb1c99679 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -11,10 +11,13 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/sort.h>
+#include "internal.h"
+
struct vctrl_voltage_range {
int min_uV;
int max_uV;
@@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
static int vctrl_get_voltage(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
- int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
return vctrl_calc_output_voltage(vctrl, ctrl_uV);
}
@@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
struct regulator *ctrl_reg = vctrl->ctrl_reg;
- int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
+ int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
int ret;
if (req_min_uV >= uV || !vctrl->ovp_threshold)
/* voltage rising or no OVP */
- return regulator_set_voltage(
- ctrl_reg,
+ return regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
- vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
+ vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
+ PM_SUSPEND_ON);
while (uV > req_min_uV) {
int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
@@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ next_ctrl_uV,
next_ctrl_uV,
- next_ctrl_uV);
+ PM_SUSPEND_ON);
if (ret)
goto err;
@@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
err:
/* Try to go back to original voltage */
- regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
+ regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+ PM_SUSPEND_ON);
return ret;
}
@@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
/* voltage rising or no OVP */
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[selector].ctrl,
vctrl->vtable[selector].ctrl,
- vctrl->vtable[selector].ctrl);
+ PM_SUSPEND_ON);
if (!ret)
vctrl->sel = selector;
@@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
else
next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl->vtable[next_sel].ctrl,
- vctrl->vtable[next_sel].ctrl);
+ vctrl->vtable[next_sel].ctrl,
+ PM_SUSPEND_ON);
if (ret) {
dev_err(&rdev->dev,
"failed to set control voltage to %duV\n",
@@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
err:
if (vctrl->sel != orig_sel) {
/* Try to go back to original voltage */
- if (!regulator_set_voltage(ctrl_reg,
+ if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[orig_sel].ctrl,
vctrl->vtable[orig_sel].ctrl,
- vctrl->vtable[orig_sel].ctrl))
+ PM_SUSPEND_ON))
vctrl->sel = orig_sel;
else
dev_warn(&rdev->dev,
@@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
- ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
if (ctrl_uV < 0) {
dev_err(&pdev->dev, "failed to get control voltage\n");
return ctrl_uV;
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
new file mode 100644
index 000000000000..6d5ae25d08d1
--- /dev/null
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com>
+// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr>
+//
+// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int ipq4019_vmmc_voltages[] = {
+ 1500000, 1800000, 2500000, 3000000,
+};
+
+static const struct regulator_ops ipq4019_regulator_voltage_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc vmmc_regulator = {
+ .name = "vmmcq",
+ .ops = &ipq4019_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .volt_table = ipq4019_vmmc_voltages,
+ .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages),
+ .vsel_reg = 0,
+ .vsel_mask = 0x3,
+};
+
+static const struct regmap_config ipq4019_vmmcq_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int ipq4019_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+ struct regulator_dev *rdev;
+ struct resource *res;
+ struct regmap *rmap;
+ void __iomem *base;
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &vmmc_regulator);
+ if (!init_data)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config);
+ if (IS_ERR(rmap))
+ return PTR_ERR(rmap);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.of_node = dev->of_node;
+ cfg.regmap = rmap;
+
+ rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator: %ld\n",
+ PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static const struct of_device_id regulator_ipq4019_of_match[] = {
+ { .compatible = "qcom,vqmmc-ipq4019-regulator", },
+ {},
+};
+
+static struct platform_driver ipq4019_regulator_driver = {
+ .probe = ipq4019_regulator_probe,
+ .driver = {
+ .name = "vqmmc-ipq4019-regulator",
+ .of_match_table = of_match_ptr(regulator_ipq4019_of_match),
+ },
+};
+module_platform_driver(ipq4019_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>");
+MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator");
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 579b3ff5c644..feb1f8e52c00 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -504,7 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc->res))
return -EBUSY;
- rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start,
+ rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start,
rtc->regsize);
if (unlikely(!rtc->regbase))
return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a1915061932e..5256e3ce84e5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
- /* (re-)init queue's state machine */
- ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 433b7b64368d..bb35ba4a8d24 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index dad2be333d82..37c3bdc3642d 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->state = AP_STATE_RESET_START;
+ aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->list);
@@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
spin_unlock_bh(&aq->lock);
}
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
+EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index c1db64a2db21..110fe9d0cb91 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
/* do some plausibility checks on the key block */
- if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
- prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
DEBUG_ERR("%s reply with invalid or unknown key block\n",
__func__);
rc = -EIO;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index c50f3e86cc74..7cbb384ec535 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 35c7c6672713..c78c0d119806 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_rapq(aq->qid);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
@@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
else
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 442e3d6162f7..6fabc906114c 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 871d44746f5c..9575a627a1e1 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -125,12 +125,6 @@ struct qeth_routing_info {
enum qeth_routing_types type;
};
-/* IPA stuff */
-struct qeth_ipa_info {
- __u32 supported_funcs;
- __u32 enabled_funcs;
-};
-
/* SETBRIDGEPORT stuff */
enum qeth_sbp_roles {
QETH_SBP_ROLE_NONE = 0,
@@ -169,41 +163,6 @@ struct qeth_vnicc_info {
bool rx_bcast_enabled;
};
-static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
- enum qeth_ipa_setadp_cmd func)
-{
- return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
- enum qeth_ipa_funcs func)
-{
- return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
- enum qeth_ipa_funcs func)
-{
- return (ipa->supported_funcs & ipa->enabled_funcs & func);
-}
-
-#define qeth_adp_supported(c, f) \
- qeth_is_adp_supported(&c->options.adp, f)
-#define qeth_is_supported(c, f) \
- qeth_is_ipa_supported(&c->options.ipa4, f)
-#define qeth_is_enabled(c, f) \
- qeth_is_ipa_enabled(&c->options.ipa4, f)
-#define qeth_is_supported6(c, f) \
- qeth_is_ipa_supported(&c->options.ipa6, f)
-#define qeth_is_enabled6(c, f) \
- qeth_is_ipa_enabled(&c->options.ipa6, f)
-#define qeth_is_ipafunc_supported(c, prot, f) \
- ((prot == QETH_PROT_IPV6) ? \
- qeth_is_supported6(c, f) : qeth_is_supported(c, f))
-#define qeth_is_ipafunc_enabled(c, prot, f) \
- ((prot == QETH_PROT_IPV6) ? \
- qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-
#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
@@ -262,7 +221,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
-#define QETH_RX_PULL_LEN 256
struct qeth_hdr_layer3 {
__u8 id;
@@ -600,7 +558,6 @@ enum qeth_channel_states {
*/
enum qeth_card_states {
CARD_STATE_DOWN,
- CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
};
@@ -650,6 +607,8 @@ struct qeth_cmd_buffer {
long timeout;
unsigned char *data;
void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply);
void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned int data_length);
int rc;
@@ -660,6 +619,14 @@ static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
refcount_inc(&iob->ref_count);
}
+static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob)
+{
+ if (!IS_IPA(iob->data))
+ return NULL;
+
+ return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+}
+
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
{
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -735,11 +702,11 @@ enum qeth_discipline_id {
};
struct qeth_card_options {
+ struct qeth_ipa_caps ipa4;
+ struct qeth_ipa_caps ipa6;
struct qeth_routing_info route4;
- struct qeth_ipa_info ipa4;
- struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
- struct qeth_ipa_info ipa6;
+ struct qeth_ipa_caps adp; /* Adapter parameters */
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
@@ -769,12 +736,10 @@ struct qeth_osn_info {
struct qeth_discipline {
const struct device_type *devtype;
- int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
- int (*recover)(void *ptr);
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
- int (*set_online) (struct ccwgroup_device *);
- int (*set_offline) (struct ccwgroup_device *);
+ int (*set_online)(struct qeth_card *card);
+ void (*set_offline)(struct qeth_card *card);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -862,6 +827,13 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
return card->state == CARD_STATE_SOFTSETUP;
}
+static inline void qeth_unlock_channel(struct qeth_card *card,
+ struct qeth_channel *channel)
+{
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
@@ -957,18 +929,6 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
return dst;
}
-static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
- u8 flags)
-{
- if ((card->dev->features & NETIF_F_RXCSUM) &&
- (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- QETH_CARD_STAT_INC(card, rx_skb_csum);
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- }
-}
-
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
@@ -1035,14 +995,11 @@ struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_do_run_thread(struct qeth_card *, unsigned long);
-void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
-void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
int qeth_stop_channel(struct qeth_channel *channel);
+int qeth_set_offline(struct qeth_card *card, bool resetting);
void qeth_print_status_message(struct qeth_card *);
-int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1065,9 +1022,6 @@ struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
- struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
- struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
@@ -1076,9 +1030,11 @@ void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
-void qeth_tx_timeout(struct net_device *);
+void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length);
+ u16 cmd_length,
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply));
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29facb913671..9639938581f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -247,9 +247,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
QETH_CARD_TEXT(card, 2, "realcbp");
- if (card->state != CARD_STATE_DOWN)
- return -EPERM;
-
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
qeth_clear_working_pool_list(card);
qeth_free_buffer_pool(card);
@@ -520,11 +517,10 @@ static int __qeth_issue_next_read(struct qeth_card *card)
} else {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
- atomic_set(&channel->irq_pending, 0);
+ qeth_unlock_channel(card, channel);
qeth_put_cmd(iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
- wake_up(&card->wait_q);
}
return rc;
}
@@ -749,8 +745,8 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
goto out;
}
- if (IS_IPA(iob->data)) {
- cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+ cmd = __ipa_reply(iob);
+ if (cmd) {
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
@@ -759,17 +755,12 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
card->osn_info.assist_cb(card->dev, cmd);
goto out;
}
- } else {
- /* non-IPA commands should only flow during initialization */
- if (card->state != CARD_STATE_DOWN)
- goto out;
}
/* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
- if (!IS_IPA(tmp->data) ||
- __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
+ if (tmp->match && tmp->match(tmp, iob)) {
request = tmp;
/* take the object outside the lock */
qeth_get_cmd(request);
@@ -824,7 +815,8 @@ static int qeth_set_thread_start_bit(struct qeth_card *card,
return 0;
}
-void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_start_bit(struct qeth_card *card,
+ unsigned long thread)
{
unsigned long flags;
@@ -833,9 +825,9 @@ void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
-void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_running_bit(struct qeth_card *card,
+ unsigned long thread)
{
unsigned long flags;
@@ -844,7 +836,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up_all(&card->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
@@ -865,7 +856,7 @@ static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
-int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
int rc = 0;
@@ -873,7 +864,6 @@ int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
(rc = __qeth_do_run_thread(card, thread)) >= 0);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
@@ -881,7 +871,6 @@ void qeth_schedule_recovery(struct qeth_card *card)
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
-EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
struct irb *irb)
@@ -972,8 +961,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* while we hold the ccwdev lock, this stays valid: */
gdev = dev_get_drvdata(&cdev->dev);
card = dev_get_drvdata(&gdev->dev);
- if (!card)
- return;
QETH_CARD_TEXT(card, 5, "irq");
@@ -1003,24 +990,25 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
channel->active_cmd = NULL;
+ qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
if (iob)
qeth_cancel_cmd(iob, rc);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
return;
}
- atomic_set(&channel->irq_pending, 0);
-
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
channel->state = CH_STATE_STOPPED;
+ wake_up(&card->wait_q);
+ }
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
channel->state = CH_STATE_HALTED;
+ wake_up(&card->wait_q);
+ }
if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
SCSW_FCTL_HALT_FUNC))) {
@@ -1054,7 +1042,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
- goto out;
+ return;
}
}
@@ -1062,16 +1050,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
qeth_cancel_cmd(iob, -EIO);
- goto out;
+ return;
}
if (iob->callback)
iob->callback(card, iob,
iob->length - irb->scsw.cmd.count);
}
-
-out:
- wake_up(&card->wait_q);
- return;
}
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
@@ -1198,31 +1182,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
}
}
-static void qeth_clean_channel(struct qeth_channel *channel)
-{
- struct ccw_device *cdev = channel->ccwdev;
-
- QETH_DBF_TEXT(SETUP, 2, "freech");
-
- spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->handler = NULL;
- spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
-static void qeth_setup_channel(struct qeth_channel *channel)
-{
- struct ccw_device *cdev = channel->ccwdev;
-
- QETH_DBF_TEXT(SETUP, 2, "setupch");
-
- channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
-
- spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->handler = qeth_irq;
- spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1318,6 +1277,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
+static int qeth_do_reset(void *data);
static void qeth_start_kernel_thread(struct work_struct *work)
{
struct task_struct *ts;
@@ -1329,8 +1289,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
card->write.state != CH_STATE_UP)
return;
if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
- ts = kthread_run(card->discipline->recover, (void *)card,
- "qeth_recover");
+ ts = kthread_run(qeth_do_reset, card, "qeth_recover");
if (IS_ERR(ts)) {
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card,
@@ -1395,9 +1354,6 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
if (!card->read_cmd)
goto out_read_cmd;
- qeth_setup_channel(&card->read);
- qeth_setup_channel(&card->write);
- qeth_setup_channel(&card->data);
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -1467,12 +1423,38 @@ int qeth_stop_channel(struct qeth_channel *channel)
channel->active_cmd);
channel->active_cmd = NULL;
}
+ cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
return rc;
}
EXPORT_SYMBOL_GPL(qeth_stop_channel);
+static int qeth_start_channel(struct qeth_channel *channel)
+{
+ struct ccw_device *cdev = channel->ccwdev;
+ int rc;
+
+ channel->state = CH_STATE_DOWN;
+ atomic_set(&channel->irq_pending, 0);
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = qeth_irq;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ rc = ccw_device_set_online(cdev);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = NULL;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ return rc;
+}
+
static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
@@ -1698,6 +1680,13 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card,
iob->callback = qeth_release_buffer_cb;
}
+static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply)
+{
+ /* MPC cmds are issued strictly in sequence. */
+ return !IS_IPA(reply->data);
+}
+
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
void *data,
unsigned int data_length)
@@ -1712,6 +1701,7 @@ static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
iob->data);
iob->finalize = qeth_mpc_finalize_cmd;
+ iob->match = qeth_mpc_match_reply;
return iob;
}
@@ -1784,8 +1774,7 @@ static int qeth_send_control_data(struct qeth_card *card,
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_dequeue_cmd(card, iob);
qeth_put_cmd(iob);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
+ qeth_unlock_channel(card, channel);
goto out;
}
@@ -2632,7 +2621,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
buf->rx_skb = netdev_alloc_skb(card->dev,
- QETH_RX_PULL_LEN + ETH_HLEN);
+ ETH_HLEN +
+ sizeof(struct ipv6hdr));
if (!buf->rx_skb)
return 1;
}
@@ -2673,7 +2663,7 @@ static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}
-int qeth_init_qdio_queues(struct qeth_card *card)
+static int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
int rc;
@@ -2721,7 +2711,6 @@ int qeth_init_qdio_queues(struct qeth_card *card)
}
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
@@ -2733,7 +2722,9 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
}
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length)
+ u16 cmd_length,
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply))
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
@@ -2741,6 +2732,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
+ iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -2753,6 +2745,14 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply)
+{
+ struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
+
+ return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
+}
+
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
@@ -2768,7 +2768,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
if (!iob)
return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length);
+ qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
@@ -2867,7 +2867,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
}
- card->options.adp.supported_funcs =
+ card->options.adp.supported =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return 0;
}
@@ -2923,8 +2923,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_CARD_TEXT(card, 2, "ipaunsup");
- card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
- card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+ card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
+ card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
return -EOPNOTSUPP;
default:
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
@@ -2932,13 +2932,11 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return -EIO;
}
- if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
- card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
- card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- } else
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ card->options.ipa4 = cmd->hdr.assists;
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ card->options.ipa6 = cmd->hdr.assists;
+ else
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
CARD_DEVID(card));
return 0;
@@ -3106,7 +3104,6 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
}
return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}
-EXPORT_SYMBOL_GPL(qeth_hw_trap);
static int qeth_check_qdio_errors(struct qeth_card *card,
struct qdio_buffer *buf,
@@ -3409,7 +3406,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
struct qeth_card *card = (struct qeth_card *)card_ptr;
if (card->dev->flags & IFF_UP)
- napi_schedule(&card->napi);
+ napi_schedule_irqoff(&card->napi);
}
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
@@ -4316,7 +4313,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
return rc;
}
-void qeth_tx_timeout(struct net_device *dev)
+void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qeth_card *card;
@@ -4697,7 +4694,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "detcapab");
if (!ddev->online) {
ddev_offline = 1;
- rc = ccw_device_set_online(ddev);
+ rc = qeth_start_channel(channel);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out;
@@ -4872,9 +4869,6 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
- qeth_clean_channel(&card->read);
- qeth_clean_channel(&card->write);
- qeth_clean_channel(&card->data);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
unregister_service_level(&card->qeth_service_level);
@@ -4937,13 +4931,14 @@ retry:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
- rc = ccw_device_set_online(CARD_RDEV(card));
+
+ rc = qeth_start_channel(&card->read);
if (rc)
goto retriable;
- rc = ccw_device_set_online(CARD_WDEV(card));
+ rc = qeth_start_channel(&card->write);
if (rc)
goto retriable;
- rc = ccw_device_set_online(CARD_DDEV(card));
+ rc = qeth_start_channel(&card->data);
if (rc)
goto retriable;
retriable:
@@ -5004,9 +4999,9 @@ retriable:
*carrier_ok = true;
}
- card->options.ipa4.supported_funcs = 0;
- card->options.ipa6.supported_funcs = 0;
- card->options.adp.supported_funcs = 0;
+ card->options.ipa4.supported = 0;
+ card->options.ipa6.supported = 0;
+ card->options.adp.supported = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
@@ -5038,6 +5033,12 @@ retriable:
if (rc)
goto out;
+ rc = qeth_init_qdio_queues(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "9err%d", rc);
+ goto out;
+ }
+
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5048,6 +5049,204 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
+static int qeth_set_online(struct qeth_card *card)
+{
+ int rc;
+
+ mutex_lock(&card->discipline_mutex);
+ mutex_lock(&card->conf_mutex);
+ QETH_CARD_TEXT(card, 2, "setonlin");
+
+ rc = card->discipline->set_online(card);
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
+}
+
+int qeth_set_offline(struct qeth_card *card, bool resetting)
+{
+ int rc, rc2, rc3;
+
+ mutex_lock(&card->discipline_mutex);
+ mutex_lock(&card->conf_mutex);
+ QETH_CARD_TEXT(card, 3, "setoffl");
+
+ if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
+ qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+ card->info.hwtrap = 1;
+ }
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ card->discipline->set_offline(card);
+
+ rc = qeth_stop_channel(&card->data);
+ rc2 = qeth_stop_channel(&card->write);
+ rc3 = qeth_stop_channel(&card->read);
+ if (!rc)
+ rc = (rc2) ? rc2 : rc3;
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
+
+ /* let user_space know that device is offline */
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_set_offline);
+
+static int qeth_do_reset(void *data)
+{
+ struct qeth_card *card = data;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "recover1");
+ if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+ return 0;
+ QETH_CARD_TEXT(card, 2, "recover2");
+ dev_warn(&card->gdev->dev,
+ "A recovery process has been started for the device\n");
+
+ qeth_set_offline(card, true);
+ rc = qeth_set_online(card);
+ if (!rc) {
+ dev_info(&card->gdev->dev,
+ "Device successfully recovered!\n");
+ } else {
+ ccwgroup_set_offline(card->gdev);
+ dev_warn(&card->gdev->dev,
+ "The qeth device driver failed to recover an error on the device\n");
+ }
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_QETH_L3)
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+ struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
+ struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
+ struct net_device *dev = skb->dev;
+
+ if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
+ "FAKELL", skb->len);
+ return;
+ }
+
+ if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
+ u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+ ETH_P_IP;
+ unsigned char tg_addr[ETH_ALEN];
+
+ skb_reset_network_header(skb);
+ switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
+ case QETH_CAST_MULTICAST:
+ if (prot == ETH_P_IP)
+ ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+ else
+ ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+ QETH_CARD_STAT_INC(card, rx_multicast);
+ break;
+ case QETH_CAST_BROADCAST:
+ ether_addr_copy(tg_addr, dev->broadcast);
+ QETH_CARD_STAT_INC(card, rx_multicast);
+ break;
+ default:
+ if (card->options.sniffer)
+ skb->pkt_type = PACKET_OTHERHOST;
+ ether_addr_copy(tg_addr, dev->dev_addr);
+ }
+
+ if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+ dev_hard_header(skb, dev, prot, tg_addr,
+ &l3_hdr->next_hop.rx.src_mac, skb->len);
+ else
+ dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
+ skb->len);
+ }
+
+ /* copy VLAN tag from hdr into skb */
+ if (!card->options.sniffer &&
+ (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
+ QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
+ u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+ l3_hdr->vlan_id :
+ l3_hdr->next_hop.rx.vlan_id;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
+ }
+}
+#endif
+
+static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr, bool uses_frags)
+{
+ struct napi_struct *napi = &card->napi;
+ bool is_cso;
+
+ switch (hdr->hdr.l2.id) {
+ case QETH_HEADER_TYPE_OSN:
+ skb_push(skb, sizeof(*hdr));
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+ QETH_CARD_STAT_INC(card, rx_packets);
+
+ card->osn_info.data_cb(skb);
+ return;
+#if IS_ENABLED(CONFIG_QETH_L3)
+ case QETH_HEADER_TYPE_LAYER3:
+ qeth_l3_rebuild_skb(card, skb, hdr);
+ is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ break;
+#endif
+ case QETH_HEADER_TYPE_LAYER2:
+ is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ break;
+ default:
+ /* never happens */
+ if (uses_frags)
+ napi_free_frags(napi);
+ else
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QETH_CARD_STAT_INC(card, rx_skb_csum);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+ QETH_CARD_STAT_INC(card, rx_packets);
+ if (skb_is_nonlinear(skb)) {
+ QETH_CARD_STAT_INC(card, rx_sg_skbs);
+ QETH_CARD_STAT_ADD(card, rx_sg_frags,
+ skb_shinfo(skb)->nr_frags);
+ }
+
+ if (uses_frags) {
+ napi_gro_frags(napi);
+ } else {
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ napi_gro_receive(napi, skb);
+ }
+}
+
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
{
struct page *page = virt_to_page(data);
@@ -5064,17 +5263,20 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
- struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element **__element, int *__offset,
- struct qeth_hdr **hdr)
+static int qeth_extract_skb(struct qeth_card *card,
+ struct qeth_qdio_buffer *qethbuffer,
+ struct qdio_buffer_element **__element,
+ int *__offset)
{
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
+ struct napi_struct *napi = &card->napi;
unsigned int linear_len = 0;
+ bool uses_frags = false;
int offset = *__offset;
bool use_rx_sg = false;
unsigned int headroom;
+ struct qeth_hdr *hdr;
struct sk_buff *skb;
int skb_len = 0;
@@ -5082,42 +5284,42 @@ next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
- return NULL;
+ return -ENODATA;
element++;
offset = 0;
}
- *hdr = element->addr + offset;
- offset += sizeof(struct qeth_hdr);
+ hdr = element->addr + offset;
+ offset += sizeof(*hdr);
skb = NULL;
- switch ((*hdr)->hdr.l2.id) {
+ switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
- skb_len = (*hdr)->hdr.l2.pkt_length;
+ skb_len = hdr->hdr.l2.pkt_length;
linear_len = ETH_HLEN;
headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
- skb_len = (*hdr)->hdr.l3.length;
+ skb_len = hdr->hdr.l3.length;
if (!IS_LAYER3(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
}
- if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) {
+ if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
linear_len = ETH_HLEN;
headroom = 0;
break;
}
- if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6)
+ if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
linear_len = sizeof(struct ipv6hdr);
else
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
- skb_len = (*hdr)->hdr.osn.pdu_length;
+ skb_len = hdr->hdr.osn.pdu_length;
if (!IS_OSN(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
@@ -5127,13 +5329,13 @@ next_packet:
headroom = sizeof(struct qeth_hdr);
break;
default:
- if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+ if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
else
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
/* Can't determine packet length, drop the whole buffer. */
- return NULL;
+ return -EPROTONOSUPPORT;
}
if (skb_len < linear_len) {
@@ -5146,21 +5348,43 @@ next_packet:
!atomic_read(&card->force_alloc_skb) &&
!IS_OSN(card));
- if (use_rx_sg && qethbuffer->rx_skb) {
+ if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
- skb = qethbuffer->rx_skb;
- qethbuffer->rx_skb = NULL;
- } else {
- if (!use_rx_sg)
- linear_len = skb_len;
- skb = napi_alloc_skb(&card->napi, linear_len + headroom);
+ if (qethbuffer->rx_skb &&
+ skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
+ skb = qethbuffer->rx_skb;
+ qethbuffer->rx_skb = NULL;
+ goto use_skb;
+ }
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ /* -ENOMEM, no point in falling back further. */
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+ goto walk_packet;
+ }
+
+ if (skb_tailroom(skb) >= linear_len + headroom) {
+ uses_frags = true;
+ goto use_skb;
+ }
+
+ netdev_info_once(card->dev,
+ "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
+ linear_len + headroom, skb_tailroom(skb));
+ /* Shouldn't happen. Don't optimize, fall back to linear skb. */
}
- if (!skb)
+ linear_len = skb_len;
+ skb = napi_alloc_skb(napi, linear_len + headroom);
+ if (!skb) {
QETH_CARD_STAT_INC(card, rx_dropped_nomem);
- else if (headroom)
- skb_reserve(skb, headroom);
+ goto walk_packet;
+ }
+use_skb:
+ if (headroom)
+ skb_reserve(skb, headroom);
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
@@ -5193,11 +5417,14 @@ walk_packet:
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
if (skb) {
- dev_kfree_skb_any(skb);
+ if (uses_frags)
+ napi_free_frags(napi);
+ else
+ dev_kfree_skb_any(skb);
QETH_CARD_STAT_INC(card,
rx_length_errors);
}
- return NULL;
+ return -EMSGSIZE;
}
element++;
offset = 0;
@@ -5210,22 +5437,40 @@ walk_packet:
*__element = element;
*__offset = offset;
- if (use_rx_sg) {
- QETH_CARD_STAT_INC(card, rx_sg_skbs);
- QETH_CARD_STAT_ADD(card, rx_sg_frags,
- skb_shinfo(skb)->nr_frags);
+
+ qeth_receive_skb(card, skb, hdr, uses_frags);
+ return 0;
+}
+
+static int qeth_extract_skbs(struct qeth_card *card, int budget,
+ struct qeth_qdio_buffer *buf, bool *done)
+{
+ int work_done = 0;
+
+ WARN_ON_ONCE(!budget);
+ *done = false;
+
+ while (budget) {
+ if (qeth_extract_skb(card, buf, &card->rx.b_element,
+ &card->rx.e_offset)) {
+ *done = true;
+ break;
+ }
+
+ work_done++;
+ budget--;
}
- return skb;
+
+ return work_done;
}
-EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
int qeth_poll(struct napi_struct *napi, int budget)
{
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
int work_done = 0;
struct qeth_qdio_buffer *buffer;
- int done;
int new_budget = budget;
+ bool done;
while (1) {
if (!card->rx.b_count) {
@@ -5248,11 +5493,10 @@ int qeth_poll(struct napi_struct *napi, int budget)
if (!(card->rx.qdio_err &&
qeth_check_qdio_errors(card, buffer->buffer,
card->rx.qdio_err, "qinerr")))
- work_done +=
- card->discipline->process_rx_buffer(
- card, new_budget, &done);
+ work_done += qeth_extract_skbs(card, new_budget,
+ buffer, &done);
else
- done = 1;
+ done = true;
if (done) {
QETH_CARD_STAT_INC(card, rx_bufs);
@@ -5421,9 +5665,9 @@ int qeth_setassparms_cb(struct qeth_card *card,
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ card->options.ipa4.enabled = cmd->hdr.assists.enabled;
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+ card->options.ipa6.enabled = cmd->hdr.assists.enabled;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -5824,7 +6068,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
goto err;
}
}
- rc = card->discipline->set_online(gdev);
+
+ rc = qeth_set_online(card);
err:
return rc;
}
@@ -5832,7 +6077,8 @@ err:
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- return card->discipline->set_offline(gdev);
+
+ return qeth_set_offline(card, false);
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
@@ -5855,7 +6101,7 @@ static int qeth_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- card->discipline->set_offline(gdev);
+ qeth_set_offline(card, false);
return 0;
}
@@ -5864,7 +6110,7 @@ static int qeth_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- rc = card->discipline->set_online(gdev);
+ rc = qeth_set_online(card);
qeth_set_allowed_threads(card, 0xffffffff, 0);
if (rc)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 458db34239a7..3865f7258449 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -53,6 +53,16 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
return (caps->enabled & mask) == mask;
}
+#define qeth_adp_supported(c, f) \
+ qeth_ipa_caps_supported(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+ qeth_ipa_caps_supported(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+ qeth_ipa_caps_supported(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+ ((prot == QETH_PROT_IPV6) ? qeth_is_supported6(c, f) : \
+ qeth_is_supported(c, f))
+
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
@@ -338,14 +348,14 @@ enum qeth_card_info_port_speed {
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
- __u8 ip_addr[4];
- __u8 mask[4];
+ __be32 addr;
+ __be32 mask;
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelip6 {
- __u8 ip_addr[16];
- __u8 mask[16];
+ struct in6_addr addr;
+ struct in6_addr prefix;
__u32 flags;
} __attribute__ ((packed));
@@ -766,8 +776,7 @@ struct qeth_ipacmd_hdr {
__u8 prim_version_no;
__u8 param_count;
__u16 prot_version;
- __u32 ipa_supported;
- __u32 ipa_enabled;
+ struct qeth_ipa_caps assists;
} __attribute__ ((packed));
/* The IPA command itself */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 7bd86027f559..2bd9993aa60b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -24,8 +24,6 @@ static ssize_t qeth_dev_state_show(struct device *dev,
switch (card->state) {
case CARD_STATE_DOWN:
return sprintf(buf, "DOWN\n");
- case CARD_STATE_HARDSETUP:
- return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
if (card->dev->flags & IFF_UP)
return sprintf(buf, "UP (LAN %s)\n",
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index ddc615b431a8..adf25c9fd2b3 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -13,7 +13,6 @@ extern const struct attribute_group *qeth_l2_attr_groups[];
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role,
enum qeth_sbp_states *state);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 47d37e75dda6..692bd2623401 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -24,7 +24,6 @@
#include "qeth_core.h"
#include "qeth_l2.h"
-static int qeth_l2_set_offline(struct ccwgroup_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -284,59 +283,17 @@ static void qeth_l2_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_HARDSETUP;
- }
- if (card->state == CARD_STATE_HARDSETUP) {
qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0;
}
-static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
- int budget, int *done)
-{
- int work_done = 0;
- struct sk_buff *skb;
- struct qeth_hdr *hdr;
- unsigned int len;
-
- *done = 0;
- WARN_ON_ONCE(!budget);
- while (budget) {
- skb = qeth_core_get_next_skb(card,
- &card->qdio.in_q->bufs[card->rx.b_index],
- &card->rx.b_element, &card->rx.e_offset, &hdr);
- if (!skb) {
- *done = 1;
- break;
- }
-
- if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- skb->protocol = eth_type_trans(skb, skb->dev);
- qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- } else {
- skb_push(skb, sizeof(*hdr));
- skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
- len = skb->len;
- card->osn_info.data_cb(skb);
- }
-
- work_done++;
- budget--;
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, len);
- }
- return work_done;
-}
-
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
@@ -649,7 +606,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_l2_set_offline(cgdev);
+ qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
@@ -767,17 +724,31 @@ static void qeth_l2_trace_features(struct qeth_card *card)
sizeof(card->options.vnicc.sup_chars));
}
-static int qeth_l2_set_online(struct ccwgroup_device *gdev)
+static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (!card->options.sbp.reflect_promisc &&
+ card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+ /* Conditional to avoid spurious error messages */
+ qeth_bridgeport_setrole(card, card->options.sbp.role);
+ /* Let the callback function refresh the stored role value. */
+ qeth_bridgeport_query_ports(card, &card->options.sbp.role,
+ NULL);
+ }
+ if (card->options.sbp.hostnotification) {
+ if (qeth_bridgeport_an_set(card, 1))
+ card->options.sbp.hostnotification = 0;
+ } else {
+ qeth_bridgeport_an_set(card, 0);
+ }
+}
+
+static int qeth_l2_set_online(struct qeth_card *card)
+{
+ struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
bool carrier_ok;
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 2, "setonlin");
-
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
@@ -787,9 +758,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs)
+ if (card->options.sbp.supported_funcs) {
+ qeth_l2_setup_bridgeport_attrs(card);
dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
+ "The device represents a Bridge Capable Port\n");
+ }
mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
@@ -800,20 +773,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
qeth_trace_features(card);
qeth_l2_trace_features(card);
- qeth_l2_setup_bridgeport_attrs(card);
-
- card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
- rc = qeth_init_qdio_queues(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- rc = -ENODEV;
- goto out_remove;
- }
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -840,8 +804,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
@@ -850,81 +812,12 @@ out_remove:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
-
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
- int recovery_mode)
+static void qeth_l2_set_offline(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- int rc = 0, rc2 = 0, rc3 = 0;
-
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 3, "setoffl");
-
- if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- card->info.hwtrap = 1;
- }
-
- rtnl_lock();
- card->info.open_when_online = card->dev->flags & IFF_UP;
- dev_close(card->dev);
- netif_device_detach(card->dev);
- netif_carrier_off(card->dev);
- rtnl_unlock();
-
qeth_l2_stop_card(card);
- rc = qeth_stop_channel(&card->data);
- rc2 = qeth_stop_channel(&card->write);
- rc3 = qeth_stop_channel(&card->read);
- if (!rc)
- rc = (rc2) ? rc2 : rc3;
- if (rc)
- QETH_CARD_TEXT_(card, 2, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
-
- /* let user_space know that device is offline */
- kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
- return 0;
-}
-
-static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
-{
- return __qeth_l2_set_offline(cgdev, 0);
-}
-
-static int qeth_l2_recover(void *ptr)
-{
- struct qeth_card *card;
- int rc = 0;
-
- card = (struct qeth_card *) ptr;
- QETH_CARD_TEXT(card, 2, "recover1");
- if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
- return 0;
- QETH_CARD_TEXT(card, 2, "recover2");
- dev_warn(&card->gdev->dev,
- "A recovery process has been started for the device\n");
- __qeth_l2_set_offline(card->gdev, 1);
- rc = qeth_l2_set_online(card->gdev);
- if (!rc)
- dev_info(&card->gdev->dev,
- "Device successfully recovered!\n");
- else {
- ccwgroup_set_offline(card->gdev);
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- }
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
- return 0;
}
static int __init qeth_l2_init(void)
@@ -961,8 +854,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l2_discipline = {
.devtype = &qeth_l2_devtype,
- .process_rx_buffer = qeth_l2_process_inbound_buffer,
- .recover = qeth_l2_recover,
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
@@ -1001,7 +892,8 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
if (!iob)
return -ENOMEM;
- qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
+ qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
+
memcpy(__ipa_cmd(iob), data, data_len);
iob->callback = qeth_osn_assist_cb;
return qeth_send_ipa_cmd(card, iob, NULL, NULL);
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 7fa325cf6f8d..86bcae992f72 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -246,40 +246,6 @@ static struct attribute_group qeth_l2_bridgeport_attr_group = {
.attrs = qeth_l2_bridgeport_attrs,
};
-/**
- * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
- * @card: qeth_card structure pointer
- *
- * Note: this function is called with conf_mutex held by the caller
- */
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
-{
- int rc;
-
- if (!card)
- return;
- if (!card->options.sbp.supported_funcs)
- return;
-
- mutex_lock(&card->sbp_lock);
- if (!card->options.sbp.reflect_promisc &&
- card->options.sbp.role != QETH_SBP_ROLE_NONE) {
- /* Conditional to avoid spurious error messages */
- qeth_bridgeport_setrole(card, card->options.sbp.role);
- /* Let the callback function refresh the stored role value. */
- qeth_bridgeport_query_ports(card,
- &card->options.sbp.role, NULL);
- }
- if (card->options.sbp.hostnotification) {
- rc = qeth_bridgeport_an_set(card, 1);
- if (rc)
- card->options.sbp.hostnotification = 0;
- } else {
- qeth_bridgeport_an_set(card, 0);
- }
- mutex_unlock(&card->sbp_lock);
-}
-
/* VNIC CHARS support */
/* convert sysfs attr name to VNIC characteristic */
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 5db04fe472c0..6ccfe2121095 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -23,7 +23,6 @@ struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
u8 is_multicast:1;
- u8 in_progress:1;
u8 disp_flag:2;
u8 ipato:1; /* ucast only */
@@ -35,7 +34,7 @@ struct qeth_ipaddr {
union {
struct {
__be32 addr;
- unsigned int mask;
+ __be32 mask;
} a4;
struct {
struct in6_addr addr;
@@ -102,7 +101,8 @@ struct qeth_ipato_entry {
extern const struct attribute_group *qeth_l3_attr_groups[];
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+ char *buf);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5508ab89b518..317d56647a4a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -37,30 +37,18 @@
#include "qeth_l3.h"
-
-static int qeth_l3_set_offline(struct ccwgroup_device *);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
-static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
-{
- sprintf(buf, "%pI4", addr);
-}
-
-static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
-{
- sprintf(buf, "%pI6", addr);
-}
-
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
- char *buf)
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+ char *buf)
{
if (proto == QETH_PROT_IPV4)
- qeth_l3_ipaddr4_to_string(addr, buf);
- else if (proto == QETH_PROT_IPV6)
- qeth_l3_ipaddr6_to_string(addr, buf);
+ return sprintf(buf, "%pI4", addr);
+ else
+ return sprintf(buf, "%pI6", addr);
}
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
@@ -161,8 +149,6 @@ static int qeth_l3_delete_ip(struct qeth_card *card,
addr->ref_counter--;
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
return rc;
- if (addr->in_progress)
- return -EINPROGRESS;
if (qeth_card_hw_is_reachable(card))
rc = qeth_l3_deregister_addr_entry(card, addr);
@@ -223,29 +209,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return 0;
}
- /* qeth_l3_register_addr_entry can go to sleep
- * if we add a IPV4 addr. It is caused by the reason
- * that SETIP ipa cmd starts ARP staff for IPV4 addr.
- * Thus we should unlock spinlock, and make a protection
- * using in_progress variable to indicate that there is
- * an hardware operation with this IPV4 address
- */
- if (addr->proto == QETH_PROT_IPV4) {
- addr->in_progress = 1;
- mutex_unlock(&card->ip_lock);
- rc = qeth_l3_register_addr_entry(card, addr);
- mutex_lock(&card->ip_lock);
- addr->in_progress = 0;
- } else
- rc = qeth_l3_register_addr_entry(card, addr);
+ rc = qeth_l3_register_addr_entry(card, addr);
if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1) {
- qeth_l3_deregister_addr_entry(card, addr);
- hash_del(&addr->hnode);
- kfree(addr);
- }
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -313,19 +280,10 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
- if (addr->proto == QETH_PROT_IPV4) {
- addr->in_progress = 1;
- mutex_unlock(&card->ip_lock);
- rc = qeth_l3_register_addr_entry(card, addr);
- mutex_lock(&card->ip_lock);
- addr->in_progress = 0;
- } else
- rc = qeth_l3_register_addr_entry(card, addr);
+ rc = qeth_l3_register_addr_entry(card, addr);
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1)
- qeth_l3_delete_ip(card, addr);
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -379,17 +337,16 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
-static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
+static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
{
- int i, j;
- for (i = 0; i < 16; i++) {
- j = (len) - (i * 8);
- if (j >= 8)
- netmask[i] = 0xff;
- else if (j > 0)
- netmask[i] = (u8)(0xFF00 >> j);
- else
- netmask[i] = 0;
+ unsigned int i = 0;
+
+ while (len && i < 4) {
+ int mask_len = min_t(int, len, 32);
+
+ prefix->s6_addr32[i] = inet_make_mask(mask_len);
+ len -= mask_len;
+ i++;
}
}
@@ -412,7 +369,6 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- __u8 netmask[16];
u32 flags;
QETH_CARD_TEXT(card, 4, "setdelip");
@@ -427,15 +383,13 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
if (addr->proto == QETH_PROT_IPV6) {
- memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
- sizeof(struct in6_addr));
- qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
- memcpy(cmd->data.setdelip6.mask, netmask,
- sizeof(struct in6_addr));
+ cmd->data.setdelip6.addr = addr->u.a6.addr;
+ qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
+ addr->u.a6.pfxlen);
cmd->data.setdelip6.flags = flags;
} else {
- memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
- memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+ cmd->data.setdelip4.addr = addr->u.a4.addr;
+ cmd->data.setdelip4.mask = addr->u.a4.mask;
cmd->data.setdelip4.flags = flags;
}
@@ -581,6 +535,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
+ mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
@@ -600,6 +555,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -613,6 +569,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "delipato");
+ mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
@@ -629,6 +586,8 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
+ mutex_unlock(&card->conf_mutex);
+
return rc;
}
@@ -637,6 +596,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
+ int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -644,7 +604,11 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- return qeth_l3_modify_ip(card, &addr, add);
+ mutex_lock(&card->conf_mutex);
+ rc = qeth_l3_modify_ip(card, &addr, add);
+ mutex_unlock(&card->conf_mutex);
+
+ return rc;
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
@@ -937,7 +901,7 @@ out:
return rc;
}
-static int qeth_l3_start_ipassists(struct qeth_card *card)
+static void qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
@@ -947,7 +911,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_multicast(card); /* go on*/
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
- return 0;
}
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
@@ -1198,96 +1161,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *hdr)
-{
- struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
- struct net_device *dev = skb->dev;
-
- if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
- dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
- "FAKELL", skb->len);
- return;
- }
-
- if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
- u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
- ETH_P_IP;
- unsigned char tg_addr[ETH_ALEN];
-
- skb_reset_network_header(skb);
- switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
- case QETH_CAST_MULTICAST:
- if (prot == ETH_P_IP)
- ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
- else
- ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
- QETH_CARD_STAT_INC(card, rx_multicast);
- break;
- case QETH_CAST_BROADCAST:
- ether_addr_copy(tg_addr, card->dev->broadcast);
- QETH_CARD_STAT_INC(card, rx_multicast);
- break;
- default:
- if (card->options.sniffer)
- skb->pkt_type = PACKET_OTHERHOST;
- ether_addr_copy(tg_addr, card->dev->dev_addr);
- }
-
- if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
- card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
- skb->len);
- else
- card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, "FAKELL", skb->len);
- }
-
- /* copy VLAN tag from hdr into skb */
- if (!card->options.sniffer &&
- (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
- QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
- u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
- hdr->hdr.l3.vlan_id :
- hdr->hdr.l3.next_hop.rx.vlan_id;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
- }
-
- qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
-}
-
-static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
- int budget, int *done)
-{
- int work_done = 0;
- struct sk_buff *skb;
- struct qeth_hdr *hdr;
-
- *done = 0;
- WARN_ON_ONCE(!budget);
- while (budget) {
- skb = qeth_core_get_next_skb(card,
- &card->qdio.in_q->bufs[card->rx.b_index],
- &card->rx.b_element, &card->rx.e_offset, &hdr);
- if (!skb) {
- *done = 1;
- break;
- }
-
- if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
- qeth_l3_rebuild_skb(card, skb, hdr);
-
- skb->protocol = eth_type_trans(skb, skb->dev);
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
-
- napi_gro_receive(&card->napi, skb);
- work_done++;
- budget--;
- }
- return work_done;
-}
-
static void qeth_l3_stop_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "stopcard");
@@ -1304,15 +1177,12 @@ static void qeth_l3_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_HARDSETUP;
- }
- if (card->state == CARD_STATE_HARDSETUP) {
qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
card->info.promisc_mode = 0;
}
@@ -2168,7 +2038,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_l3_set_offline(cgdev);
+ qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
@@ -2180,17 +2050,13 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_clear_ipato_list(card);
}
-static int qeth_l3_set_online(struct ccwgroup_device *gdev)
+static int qeth_l3_set_online(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
bool carrier_ok;
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 2, "setonlin");
-
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
@@ -2198,7 +2064,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
/* softsetup */
@@ -2208,11 +2073,8 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
if (rc)
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
if (!card->options.sniffer) {
- rc = qeth_l3_start_ipassists(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "3err%d", rc);
- goto out_remove;
- }
+ qeth_l3_start_ipassists(card);
+
rc = qeth_l3_setrouting_v4(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
@@ -2221,12 +2083,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
}
- rc = qeth_init_qdio_queues(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- rc = -ENODEV;
- goto out_remove;
- }
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -2255,8 +2111,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
qeth_trace_features(card);
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
qeth_l3_stop_card(card);
@@ -2264,88 +2118,12 @@ out_remove:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
-
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
- int recovery_mode)
+static void qeth_l3_set_offline(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- int rc = 0, rc2 = 0, rc3 = 0;
-
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 3, "setoffl");
-
- if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- card->info.hwtrap = 1;
- }
-
- rtnl_lock();
- card->info.open_when_online = card->dev->flags & IFF_UP;
- dev_close(card->dev);
- netif_device_detach(card->dev);
- netif_carrier_off(card->dev);
- rtnl_unlock();
-
qeth_l3_stop_card(card);
- if (card->options.cq == QETH_CQ_ENABLED) {
- rtnl_lock();
- call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
- rtnl_unlock();
- }
-
- rc = qeth_stop_channel(&card->data);
- rc2 = qeth_stop_channel(&card->write);
- rc3 = qeth_stop_channel(&card->read);
- if (!rc)
- rc = (rc2) ? rc2 : rc3;
- if (rc)
- QETH_CARD_TEXT_(card, 2, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
-
- /* let user_space know that device is offline */
- kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
- return 0;
-}
-
-static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
-{
- return __qeth_l3_set_offline(cgdev, 0);
-}
-
-static int qeth_l3_recover(void *ptr)
-{
- struct qeth_card *card;
- int rc = 0;
-
- card = (struct qeth_card *) ptr;
- QETH_CARD_TEXT(card, 2, "recover1");
- QETH_CARD_HEX(card, 2, &card, sizeof(void *));
- if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
- return 0;
- QETH_CARD_TEXT(card, 2, "recover2");
- dev_warn(&card->gdev->dev,
- "A recovery process has been started for the device\n");
- __qeth_l3_set_offline(card->gdev, 1);
- rc = qeth_l3_set_online(card->gdev);
- if (!rc)
- dev_info(&card->gdev->dev,
- "Device successfully recovered!\n");
- else {
- ccwgroup_set_offline(card->gdev);
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- }
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
- return 0;
}
/* Returns zero if the command is successfully "consumed" */
@@ -2357,8 +2135,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
- .process_rx_buffer = qeth_l3_process_inbound_buffer,
- .recover = qeth_l3_recover,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
@@ -2437,7 +2213,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
addr.u.a4.addr = ifa->ifa_address;
- addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
+ addr.u.a4.mask = ifa->ifa_mask;
return qeth_l3_handle_ip_event(card, &addr, event);
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index e8c848f72c6d..29f2517d2a31 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -402,30 +402,35 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
- char addr_str[40];
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int str_len = 0;
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- /* add strlen for "/<mask>\n" */
- entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+ char addr_str[40];
+ int entry_len;
+
if (ipatoe->proto != proto)
continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+
+ entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr,
+ addr_str);
+ if (entry_len < 0)
+ continue;
+
+ /* Append /%mask to the entry: */
+ entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3);
+ /* Enough room to format %entry\n into null terminated page? */
+ if (entry_len + 1 > PAGE_SIZE - str_len - 1)
break;
- qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i,
- "%s/%i\n", addr_str, ipatoe->mask_bits);
+
+ entry_len = scnprintf(buf, PAGE_SIZE - str_len,
+ "%s/%i\n", addr_str, ipatoe->mask_bits);
+ str_len += entry_len;
+ buf += entry_len;
}
mutex_unlock(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
- return i;
+ return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
@@ -471,16 +476,14 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
int mask_bits;
int rc = 0;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
- goto out;
+ return rc;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
- if (!ipatoe) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!ipatoe)
+ return -ENOMEM;
+
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
@@ -488,8 +491,7 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
rc = qeth_l3_add_ipato_entry(card, ipatoe);
if (rc)
kfree(ipatoe);
-out:
- mutex_unlock(&card->conf_mutex);
+
return rc ? rc : count;
}
@@ -512,11 +514,9 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
int mask_bits;
int rc = 0;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -623,31 +623,34 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *ipaddr;
- char addr_str[40];
int str_len = 0;
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- entry_len += 2; /* \n + terminator */
mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
+ char addr_str[40];
+ int entry_len;
+
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - str_len) <= entry_len)
+
+ entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u,
+ addr_str);
+ if (entry_len < 0)
+ continue;
+
+ /* Enough room to format %addr\n into null terminated page? */
+ if (entry_len + 1 > PAGE_SIZE - str_len - 1)
break;
- qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
- addr_str);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
- addr_str);
+
+ entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
+ str_len += entry_len;
+ buf += entry_len;
}
mutex_unlock(&card->ip_lock);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return str_len;
+ return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -658,63 +661,34 @@ static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
QETH_IP_TYPE_VIPA);
}
-static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
- u8 *addr)
-{
- if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
- return -EINVAL;
- }
- return 0;
-}
-
-static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add,
+ size_t count, enum qeth_prot_versions proto)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_vipae(buf, proto, addr);
+ rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+ rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_VIPA, proto);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
qeth_l3_dev_vipa_add4_show,
qeth_l3_dev_vipa_add4_store);
-static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
-{
- u8 addr[16];
- int rc;
-
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, false, addr,
- QETH_IP_TYPE_VIPA, proto);
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
@@ -731,9 +705,7 @@ static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
@@ -743,9 +715,7 @@ static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
@@ -798,54 +768,34 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
return 0;
}
-static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add,
+ size_t count, enum qeth_prot_versions proto)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+ rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_RXIP, proto);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
qeth_l3_dev_rxip_add4_show,
qeth_l3_dev_rxip_add4_store);
-static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
-{
- u8 addr[16];
- int rc;
-
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, false, addr,
- QETH_IP_TYPE_RXIP, proto);
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
@@ -862,9 +812,7 @@ static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
@@ -874,9 +822,7 @@ static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 90cf4691b8c3..a7881f8eb05e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 8466aa784ec1..8b891a05d9e7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
if (!request_mem_region(start, 0x1000, "aic79xx"))
error = ENOMEM;
if (!error) {
- *maddr = ioremap_nocache(base_page, base_offset + 512);
+ *maddr = ioremap(base_page, base_offset + 512);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 717d8d1082ce..9b293b1f0b71 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
if (!request_mem_region(start, 0x1000, "aic7xxx"))
error = ENOMEM;
if (error == 0) {
- *maddr = ioremap_nocache(start, 256);
+ *maddr = ioremap(start, 256);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index db687ef8a99e..40dc8eac0e3a 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
break;
}
case ACB_ADAPTER_TYPE_C:{
- acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+ acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!acb->pmuC) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0760d0bd8a10..9b81cfbbc5c5 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
u8 __iomem *addr;
int pcicfg_reg;
- addr = ioremap_nocache(pci_resource_start(pcidev, 2),
+ addr = ioremap(pci_resource_start(pcidev, 2),
pci_resource_len(pcidev, 2));
if (addr == NULL)
return -ENOMEM;
phba->ctrl.csr = addr;
phba->csr_va = addr;
- addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
+ addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024);
if (addr == NULL)
goto pci_map_err;
phba->ctrl.db = addr;
@@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
else
pcicfg_reg = 0;
- addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+ addr = ioremap(pci_resource_start(pcidev, pcicfg_reg),
pci_resource_len(pcidev, pcicfg_reg));
if (addr == NULL)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index f069e09beb10..6f8335ddb1f2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1414,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
- tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ tgt->ctx_base = ioremap(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 12666313b937..e53ebc5eff85 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
- ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ ep->qp.ctx_base = ioremap(reg_base + reg_off, 4);
if (!ep->qp.ctx_base)
return -ENOMEM;
goto arm_cq;
@@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
- ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
+ ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 2e8a3ac575cb..8dea7d53788a 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -529,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
goto err_free_hw;
/* Get the start address of registers from BAR 0 */
- hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ hw->regstart = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!hw->regstart) {
csio_err(hw, "Could not map BAR 0, regstart = %p\n",
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8ef150dfb6f7..b60795893994 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1f55b9e4e74a..1b88a3b53eee 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1;
+ u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
@@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
pr_err("Can't del addr [%pM], %d\n", addr, err);
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 216e557f703e..1a4ddfacb458 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6876,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap_nocache(page_base,
+ void __iomem *page_remapped = ioremap(page_base,
page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index abac2f350aee..c48a73a0f517 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->base = ioremap(base, 0x100);
hostdata->differential = 0;
if (dev->id.sversion == LASI_700_SVERSION) {
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index f6ac819e6e96..8443f2f35be2 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter)
goto out_free_raid_dev;
}
- raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+ raid_dev->baseaddr = ioremap(raid_dev->baseport, 128);
if (!raid_dev->baseaddr) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a4bc81479284..c60cd9fc4240 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5875,7 +5875,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
base_addr = pci_resource_start(instance->pdev, instance->bar);
- instance->reg_set = ioremap_nocache(base_addr, 8192);
+ instance->reg_set = ioremap(base_addr, 8192);
if (!instance->reg_set) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 539ac8ce4fcd..d4bd31a75b9d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
spin_lock_init(&cb->queue_lock);
if (mmio_size < PAGE_SIZE)
mmio_size = PAGE_SIZE;
- cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+ cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
if (cb->mmio_base == NULL) {
dev_err(&pdev->dev,
"Unable to map Controller Register Window\n");
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index eb0dd566330a..5c5666491c2e 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
/* Map the Controller Register Window. */
if (mmio_size < PAGE_SIZE)
mmio_size = PAGE_SIZE;
- cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+ cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
if (cs->mmio_base == NULL) {
dev_err(&pdev->dev,
"Unable to map Controller Register Window\n");
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 93616f9fd6d7..d79ce97a04bd 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1560,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
goto next_entry;
data->MmioAddress = (unsigned long)
- ioremap_nocache(p_dev->resource[2]->start,
+ ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
data->MmioLength = resource_size(p_dev->resource[2]);
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 605b59c76c90..a3a44d4ace1e 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -789,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
}
ha->cregbase =
- ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
if (!ha->cregbase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -810,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
}
ha->iobase =
- ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
if (!ha->iobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cea625906440..902b649fc8ef 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2211,8 +2211,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
u8 type;
int ret = 0;
- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+ sdkp->protection_type = 0;
return ret;
+ }
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
@@ -2956,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
q->limits.zoned = BLK_ZONED_HM;
} else {
sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
+ if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
/* Host-aware */
q->limits.zoned = BLK_ZONED_HA;
- else
+ } else {
/*
- * Treat drive-managed devices as
- * regular block devices.
+ * Treat drive-managed devices and host-aware devices
+ * with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ }
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 412ac56ecd60..b7492568e02f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -7457,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
goto disable_device;
}
- ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+ ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
sizeof(struct pqi_ctrl_registers));
if (!ctrl_info->iomem_base) {
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index a85d52b5dc32..f8397978f8ab 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -71,7 +71,7 @@ static int snirm710_probe(struct platform_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->base = ioremap(base, 0x100);
hostdata->differential = 0;
hostdata->clock = SNIRM710_CLOCK;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f8faf8b3d965..fb41636519ee 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device,
*/
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
/*
+ * For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
*/
- host->nr_hw_queues = num_present_cpus();
+ if (!dev_is_ide)
+ host->nr_hw_queues = num_present_cpus();
/*
* Set the error handler work queue.
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 440a73eae647..f37df79e37e1 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!res || !res->start)
goto fail_unlink;
- esp->regs = ioremap_nocache(res->start, 0x20);
+ esp->regs = ioremap(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
@@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!res || !res->start)
goto fail_unmap_regs;
- esp->dma_regs = ioremap_nocache(res->start, 0x10);
+ esp->dma_regs = ioremap(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 77bce208210e..7eac76cccc4c 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev)
struct gsc_irq gsc_irq;
u32 zalon_vers;
int error = -ENODEV;
- void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *zalon = ioremap(dev->hpa.start, 4096);
void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
static int unit = 0;
struct Scsi_Host *host;
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index a23a8e5794f5..bdd82e497d5f 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -801,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
/* additional setup required for Fastlane */
if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
/* map full address space up to ESP base for DMA */
- zep->board_base = ioremap_nocache(board,
+ zep->board_base = ioremap(board,
FASTLANE_ESP_ADDR-1);
if (!zep->board_base) {
pr_err("Cannot allocate board address space\n");
@@ -816,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
esp->ops = zdd->esp_ops;
if (ioaddr > 0xffffff)
- esp->regs = ioremap_nocache(ioaddr, 0x20);
+ esp->regs = ioremap(ioaddr, 0x20);
else
/* ZorroII address space remapped nocache by early startup */
esp->regs = ZTWO_VADDR(ioaddr);
@@ -842,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
* Only Fastlane Z3 for now - add switch for correct struct
* dma_registers size if adding any more
*/
- esp->dma_regs = ioremap_nocache(dmaaddr,
+ esp->dma_regs = ioremap(dmaaddr,
sizeof(struct fastlane_dma_registers));
} else
/* ZorroII address space remapped nocache by early startup */
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 9475353f49d6..d996782a7106 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -368,7 +368,7 @@ static int clk_establish_mapping(struct clk *clk)
if (!mapping->base && mapping->phys) {
kref_init(&mapping->ref);
- mapping->base = ioremap_nocache(mapping->phys, mapping->len);
+ mapping->base = ioremap(mapping->phys, mapping->len);
if (unlikely(!mapping->base))
return -ENXIO;
} else if (mapping->base) {
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8485e812d9b2..f8e070d67fa3 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -213,7 +213,7 @@ int __init register_intc_controller(struct intc_desc *desc)
WARN_ON(resource_type(res) != IORESOURCE_MEM);
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
- d->window[k].virt = ioremap_nocache(res->start,
+ d->window[k].virt = ioremap(res->start,
resource_size(res));
if (!d->window[k].virt)
goto err2;
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 87d69e7471f9..f9f043a3d90a 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -73,7 +73,7 @@ int register_intc_userimask(unsigned long addr)
if (unlikely(uimask))
return -EBUSY;
- uimask = ioremap_nocache(addr, SZ_4K);
+ uimask = ioremap(addr, SZ_4K);
if (unlikely(!uimask))
return -ENOMEM;
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 5823f5b67d16..3f0261d53ad9 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
struct meson_ee_pwrc *pwrc,
struct meson_ee_pwrc_domain *dom)
{
+ int ret;
+
dom->pwrc = pwrc;
dom->num_rstc = dom->desc.reset_names_count;
dom->num_clks = dom->desc.clk_names_count;
@@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
* prepare/enable counters won't be in sync.
*/
if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
- int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+ ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
if (ret)
return ret;
- pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
- } else
- pm_genpd_init(&dom->base, NULL,
- (dom->desc.get_power ?
- dom->desc.get_power(dom) : true));
+ ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_genpd_init(&dom->base, NULL,
+ (dom->desc.get_power ?
+ dom->desc.get_power(dom) : true));
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
pwrc->xlate.domains[i] = &dom->base;
}
- of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
-
- return 0;
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
index eb96a3086d6d..5db919d96aba 100644
--- a/drivers/soc/tegra/flowctrl.c
+++ b/drivers/soc/tegra/flowctrl.c
@@ -219,7 +219,7 @@ static int __init tegra_flowctrl_init(void)
return 0;
}
- tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+ tegra_flowctrl_base = ioremap(res.start, resource_size(&res));
if (!tegra_flowctrl_base)
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 4d719d4b8d5a..606abbe55bba 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -408,7 +408,7 @@ static int __init tegra_init_fuse(void)
}
}
- fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+ fuse->base = ioremap(regs.start, resource_size(&regs));
if (!fuse->base) {
pr_err("failed to map FUSE registers\n");
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index df76778af601..a2fd6ccd48f9 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -159,11 +159,11 @@ void __init tegra_init_apbmisc(void)
}
}
- apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+ apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
if (!apbmisc_base)
pr_err("failed to map APBMISC registers\n");
- strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
+ strapping_base = ioremap(straps.start, resource_size(&straps));
if (!strapping_base)
pr_err("failed to map strapping options registers\n");
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index ea0e11a09c12..1699dda6b393 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -2826,7 +2826,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
of_address_to_resource(np, index, &regs);
- wake = ioremap_nocache(regs.start, resource_size(&regs));
+ wake = ioremap(regs.start, resource_size(&regs));
if (!wake) {
dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
@@ -3097,7 +3097,7 @@ static int __init tegra_pmc_early_init(void)
}
}
- pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
+ pmc->base = ioremap(regs.start, resource_size(&regs));
if (!pmc->base) {
pr_err("failed to map PMC registers\n");
of_node_put(np);
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index cf545f428d03..4486e055794c 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+config TI_K3_RINGACC
+ bool "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 788b5cd1e180..bec827937a5f 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644
index 000000000000..5fb2ee2ac978
--- /dev/null
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @free: Number of free elements
+ * @occ: Ring occupancy
+ * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
+ * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies, 0);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ ring->size,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ mode,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = readl(&ring->rt->occ);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ u32 ring_idx;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_idx = ring->ring_id;
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring_idx,
+ lower_32_bits(ring->ring_mem_dma),
+ upper_32_bits(ring->ring_mem_dma),
+ ring->size,
+ ring->mode,
+ ring->elm_size,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring_idx);
+
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ };
+
+ ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->free)
+ ring->free = ring->size - readl(&ring->rt->occ);
+
+ return ring->free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
+ ring->occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
+ ring->windex, ring->occ, ring->rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+ ring->windex = (ring->windex + 1) % ring->size;
+ ring->free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->free, ring->windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->rindex = (ring->rindex + 1) % ring->size;
+ ring->occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->occ, ring->rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
+ ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->free, ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->dma_ring_reset_quirk =
+ of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+ base_fifo = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+ ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "proxy_target");
+ ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+ ringacc->proxy_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_proxies),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+ dev_set_drvdata(dev, ringacc);
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", },
+ {},
+};
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(k3_ringacc_driver);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 378369d9364a..e9ece45d7a33 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
ret = rproc_boot(m3_ipc->rproc);
if (ret)
dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
do_exit(0);
}
@@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
- m3_ipc_state = m3_ipc;
-
return 0;
err_put_rproc:
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a840c0272135..a3aa40996f13 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -511,7 +511,7 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENODEV;
}
- xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->vcu_slcr_ba) {
dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
@@ -524,7 +524,7 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENODEV;
}
- xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->logicore_reg_ba) {
dev_err(&pdev->dev, "logicore register mapping failed.\n");
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 99dc61021211..0371d3d5501a 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -529,17 +529,24 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
}
-static int intel_config_stream(struct sdw_intel *sdw,
+static int intel_params_stream(struct sdw_intel *sdw,
struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
- struct snd_pcm_hw_params *hw_params, int link_id)
+ struct snd_pcm_hw_params *hw_params,
+ int link_id, int alh_stream_id)
{
struct sdw_intel_link_res *res = sdw->res;
+ struct sdw_intel_stream_params_data params_data;
- if (res->ops && res->ops->config_stream && res->arg)
- return res->ops->config_stream(res->arg,
- substream, dai, hw_params, link_id);
+ params_data.substream = substream;
+ params_data.dai = dai;
+ params_data.hw_params = hw_params;
+ params_data.link_id = link_id;
+ params_data.alh_stream_id = alh_stream_id;
+ if (res->ops && res->ops->params_stream && res->dev)
+ return res->ops->params_stream(res->dev,
+ &params_data);
return -EIO;
}
@@ -654,7 +661,8 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
/* Inform DSP about PDI stream number */
- ret = intel_config_stream(sdw, substream, dai, params,
+ ret = intel_params_stream(sdw, substream, dai, params,
+ sdw->instance,
pdi->intel_alh_id);
if (ret)
goto error;
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index d923b6262330..38b7c125fb10 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -5,23 +5,26 @@
#define __SDW_INTEL_LOCAL_H
/**
- * struct sdw_intel_link_res - Soundwire link resources
+ * struct sdw_intel_link_res - Soundwire Intel link resource structure,
+ * typically populated by the controller driver.
+ * @pdev: platform_device
+ * @mmio_base: mmio base of SoundWire registers
* @registers: Link IO registers base
* @shim: Audio shim pointer
* @alh: ALH (Audio Link Hub) pointer
* @irq: Interrupt line
* @ops: Shim callback ops
- * @arg: Shim callback ops argument
- *
- * This is set as pdata for each link instance.
+ * @dev: device implementing hw_params and free callbacks
*/
struct sdw_intel_link_res {
+ struct platform_device *pdev;
+ void __iomem *mmio_base; /* not strictly needed, useful for debug */
void __iomem *registers;
void __iomem *shim;
void __iomem *alh;
int irq;
const struct sdw_intel_ops *ops;
- void *arg;
+ struct device *dev;
};
#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 2a2b4d8df462..4b769409f6f8 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -27,19 +27,9 @@ static int link_mask;
module_param_named(sdw_link_mask, link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
-struct sdw_link_data {
- struct sdw_intel_link_res res;
- struct platform_device *pdev;
-};
-
-struct sdw_intel_ctx {
- int count;
- struct sdw_link_data *links;
-};
-
static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
{
- struct sdw_link_data *link = ctx->links;
+ struct sdw_intel_link_res *link = ctx->links;
int i;
if (!link)
@@ -62,7 +52,7 @@ static struct sdw_intel_ctx
{
struct platform_device_info pdevinfo;
struct platform_device *pdev;
- struct sdw_link_data *link;
+ struct sdw_intel_link_res *link;
struct sdw_intel_ctx *ctx;
struct acpi_device *adev;
int ret, i;
@@ -123,14 +113,13 @@ static struct sdw_intel_ctx
continue;
}
- link->res.irq = res->irq;
- link->res.registers = res->mmio_base + SDW_LINK_BASE
+ link->registers = res->mmio_base + SDW_LINK_BASE
+ (SDW_LINK_SIZE * i);
- link->res.shim = res->mmio_base + SDW_SHIM_BASE;
- link->res.alh = res->mmio_base + SDW_ALH_BASE;
+ link->shim = res->mmio_base + SDW_SHIM_BASE;
+ link->alh = res->mmio_base + SDW_ALH_BASE;
- link->res.ops = res->ops;
- link->res.arg = res->arg;
+ link->ops = res->ops;
+ link->dev = res->dev;
memset(&pdevinfo, 0, sizeof(pdevinfo));
@@ -138,8 +127,6 @@ static struct sdw_intel_ctx
pdevinfo.name = "int-sdw";
pdevinfo.id = i;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
- pdevinfo.data = &link->res;
- pdevinfo.size_data = sizeof(link->res);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
@@ -216,7 +203,6 @@ void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
return sdw_intel_add_controller(res);
}
-EXPORT_SYMBOL(sdw_intel_init);
/**
* sdw_intel_exit() - SoundWire Intel exit
@@ -224,10 +210,8 @@ EXPORT_SYMBOL(sdw_intel_init);
*
* Delete the controller instances created and cleanup
*/
-void sdw_intel_exit(void *arg)
+void sdw_intel_exit(struct sdw_intel_ctx *ctx)
{
- struct sdw_intel_ctx *ctx = arg;
-
sdw_intel_cleanup_pdev(ctx);
kfree(ctx);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 870f7797b56b..d6ed0c355954 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI
This controller does not support generic SPI messages. It only
supports the high-level SPI memory interface.
+config SPI_HISI_SFC_V3XX
+ tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CONFIG_MTD_SPI_NOR
+ help
+ This enables support for HiSilicon v3xx SPI-NOR flash controller
+ found in hi16xx chipsets.
+
config SPI_NXP_FLEXSPI
tristate "NXP Flex SPI controller"
depends on ARCH_LAYERSCAPE || HAS_IOMEM
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index bb49c9e6d0a0..9b65ec5afc5e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 56f0ca361deb..013458cabe3c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master,
master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx);
- if (err == -EPROBE_DEFER) {
- dev_warn(dev, "no DMA channel available at the moment\n");
- goto error_clear;
- }
- dev_err(dev,
- "DMA TX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
- /*
- * No reason to check EPROBE_DEFER here since we have already requested
- * tx channel. If it fails here, it's for another reason.
- */
- master->dma_rx = dma_request_slave_channel(dev, "rx");
-
- if (!master->dma_rx) {
- dev_err(dev,
- "DMA RX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ master->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ err = PTR_ERR(master->dma_rx);
+ /*
+ * No reason to check EPROBE_DEFER here since we have already
+ * requested tx channel.
+ */
+ dev_err(dev, "No RX DMA channel, DMA is disabled\n");
goto error;
}
@@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
return 0;
error:
- if (master->dma_rx)
+ if (!IS_ERR(master->dma_rx))
dma_release_channel(master->dma_rx);
if (!IS_ERR(master->dma_tx))
dma_release_channel(master->dma_tx);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 85bad70f59e3..23d295f36c80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
name = qspi_irq_tab[val].irq_name;
if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
/* get the l2 interrupts */
- irq = platform_get_irq_byname(pdev, name);
+ irq = platform_get_irq_byname_optional(pdev, name);
} else if (!num_ints && soc_intc) {
/* all mspi, bspi intrs muxed to one L1 intr */
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index fb61a620effc..11c235879bb7 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -68,7 +68,7 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
+#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
}
}
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
- struct bcm2835_spi *bs)
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+ struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
const __be32 *addr;
@@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
- goto err;
+ /* Fall back to interrupt mode */
+ return 0;
}
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
- ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!ctlr->dma_tx) {
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
goto err;
}
- ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!ctlr->dma_rx) {
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
goto err_release;
}
@@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
- return;
+ return 0;
err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
@@ -1005,7 +1010,14 @@ err_config:
err_release:
bcm2835_dma_release(ctlr, bs);
err:
- return;
+ /*
+ * Only report error for deferred probing, otherwise fall back to
+ * interrupt mode
+ */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
@@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
- dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+ else
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_controller_put;
}
@@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ if (err)
+ goto out_clk_disable;
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctlr);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
- goto out_clk_disable;
+ goto out_dma_release;
}
err = devm_spi_register_controller(&pdev->dev, ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
- goto out_clk_disable;
+ goto out_dma_release;
}
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
+out_dma_release:
+ bcm2835_dma_release(ctlr, bs);
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_controller_put:
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index d84e22dd6f9f..68491a8bf7b5 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
+ bool custom_cs;
- if (!master || !bitbang->chipselect)
+ if (!master)
+ return -EINVAL;
+ /*
+ * We only need the chipselect callback if we are actually using it.
+ * If we just use GPIO descriptors, it is surplus. If the
+ * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+ * driver-specific chipselect routine.
+ */
+ custom_cs = (!master->use_gpio_descriptors ||
+ (master->flags & SPI_MASTER_GPIO_SS));
+
+ if (custom_cs && !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
@@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
- master->set_cs = spi_bitbang_set_cs;
+ /*
+ * When using GPIO descriptors, the ->set_cs() callback doesn't even
+ * get called unless SPI_MASTER_GPIO_SS is set.
+ */
+ if (custom_cs)
+ master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 2663bb12d9ce..0d86c37e0aeb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -301,7 +301,7 @@ int dw_spi_mid_init(struct dw_spi *dws)
void __iomem *clk_reg;
u32 clk_cdiv;
- clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
+ clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
if (!clk_reg)
return -ENOMEM;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a25da377119..31e3f866d11a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->len = transfer->len;
spin_unlock_irqrestore(&dws->buf_lock, flags);
+ /* Ensure dw->rx and dw->rx_end are visible */
+ smp_mb();
+
spi_enable_chip(dws, 0);
/* Handle per transfer options for bpw and speed */
@@ -469,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
struct spi_controller *master;
int ret;
- BUG_ON(dws == NULL);
+ if (!dws)
+ return -EINVAL;
master = spi_alloc_master(dev, 0);
if (!master)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 8428b69c858b..6ec2dcb8c57a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -396,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
if (!dma)
return -ENOMEM;
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
- if (!dma->chan_rx) {
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "rx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_rx);
return ret;
}
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
- if (!dma->chan_tx) {
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
dev_err(dev, "tx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_tx);
goto err_tx_channel;
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 2cc0ddb4a988..d0b8cc741a24 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
if (fsl_lpspi_can_dma(controller, spi, t))
- fsl_lpspi->usedma = 1;
+ fsl_lpspi->usedma = true;
else
- fsl_lpspi->usedma = 0;
+ fsl_lpspi->usedma = false;
return fsl_lpspi_config(fsl_lpspi);
}
@@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_slave = is_slave;
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->slave_abort = fsl_lpspi_slave_abort;
+
+ ret = devm_spi_register_controller(&pdev->dev, controller);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_controller error.\n");
+ goto out_controller_put;
+ }
+
if (!fsl_lpspi->is_slave) {
for (i = 0; i < controller->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
controller->prepare_message = fsl_lpspi_prepare_message;
}
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->transfer_one = fsl_lpspi_transfer_one;
- controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
- controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
- controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- controller->dev.of_node = pdev->dev.of_node;
- controller->bus_num = pdev->id;
- controller->slave_abort = fsl_lpspi_slave_abort;
-
init_completion(&fsl_lpspi->xfer_done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
- ret = devm_spi_register_controller(&pdev->dev, controller);
- if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
- goto out_controller_put;
- }
-
return 0;
out_controller_put:
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 79b1558b74b8..e8a499cd1f13 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
op->data.nbytes > q->devtype_data->txfifo)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index fb4159ad6bf6..3b81772fea0d 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- int irq = 0, type;
- int ret = -ENOMEM;
+ int irq, type;
+ int ret;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
@@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
if (spisel_boot) {
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
- if (!pinfo->immr_spi_cs) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!pinfo->immr_spi_cs)
+ return -ENOMEM;
}
#endif
/*
@@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
ret = of_address_to_resource(np, 0, &mem);
if (ret)
- goto err;
+ return ret;
irq = platform_get_irq(ofdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
+ if (irq < 0)
+ return irq;
master = fsl_spi_probe(dev, &mem, irq);
- if (IS_ERR(master)) {
- ret = PTR_ERR(master);
- goto err;
- }
-
- return 0;
-err:
- return ret;
+ return PTR_ERR_OR_ZERO(master);
}
static int of_fsl_spi_remove(struct platform_device *ofdev)
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
new file mode 100644
index 000000000000..4cf8fc80a7b7
--- /dev/null
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+struct hisi_sfc_v3xx_host {
+ struct device *dev;
+ void __iomem *regbase;
+ int max_cmd_dword;
+};
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+ !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+ HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+ HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+ uintptr_t addr = (uintptr_t)op->data.buf.in;
+ int max_byte_count;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ max_byte_count = host->max_cmd_dword * 4;
+
+ if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+ op->data.nbytes = 4 - (addr % 4);
+ else if (op->data.nbytes > max_byte_count)
+ op->data.nbytes = max_byte_count;
+
+ return 0;
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+ u8 *to, unsigned int len)
+{
+ void __iomem *from;
+ int i;
+
+ from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)to, 4)) {
+ int words = len / 4;
+
+ __ioread32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val;
+
+ to += words * 4;
+ from += words * 4;
+
+ val = __raw_readl(from);
+
+ for (i = 0; i < len; i++, val >>= 8, to++)
+ *to = (u8)val;
+ }
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+ u32 val = __raw_readl(from);
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ to++, val >>= 8, j++)
+ *to = (u8)val;
+ }
+ }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+ const u8 *from, unsigned int len)
+{
+ void __iomem *to;
+ int i;
+
+ to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)from, 4)) {
+ int words = len / 4;
+
+ __iowrite32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val = 0;
+
+ to += words * 4;
+ from += words * 4;
+
+ for (i = 0; i < len; i++, from++)
+ val |= *from << i * 8;
+ __raw_writel(val, to);
+ }
+
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+ u32 val = 0;
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ from++, j++)
+ val |= *from << j * 8;
+ __raw_writel(val, to);
+ }
+ }
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ int ret, len = op->data.nbytes;
+ u32 config = 0;
+
+ if (op->addr.nbytes)
+ config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+ config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
+ else if (op->data.dir == SPI_MEM_DATA_IN)
+ config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+ config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+ chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+ HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+ writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+ writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+ writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+ ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+
+ return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_device *spi = mem->spi;
+ u8 chip_select = spi->chip_select;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+ .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_controller *ctlr;
+ u32 version;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ host = spi_controller_get_devdata(ctlr);
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+
+ host->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->regbase)) {
+ ret = PTR_ERR(host->regbase);
+ goto err_put_master;
+ }
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+ version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+ switch (version) {
+ case 0x351:
+ host->max_cmd_dword = 64;
+ break;
+ default:
+ host->max_cmd_dword = 16;
+ break;
+ }
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_put_master;
+
+ dev_info(&pdev->dev, "hw version 0x%x\n", version);
+
+ return 0;
+
+err_put_master:
+ spi_master_put(ctlr);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+ {"HISI0341", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+#endif
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+ .driver = {
+ .name = "hisi-sfc-v3xx",
+ .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
+ },
+ .probe = hisi_sfc_v3xx_probe,
+};
+
+module_platform_driver(hisi_sfc_v3xx_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index f4a8f470aecc..8543f5ed1099 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev)
master->unprepare_message = img_spfi_unprepare;
master->handle_err = img_spfi_handle_err;
- spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
- spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+ spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+ if (IS_ERR(spfi->tx_ch)) {
+ ret = PTR_ERR(spfi->tx_ch);
+ spfi->tx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
+ spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+ if (IS_ERR(spfi->rx_ch)) {
+ ret = PTR_ERR(spfi->rx_ch);
+ spfi->rx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
if (!spfi->tx_ch || !spfi->rx_ch) {
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 49f0099db0cb..f4f28a400a96 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi,
}
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
- spi_imx->usedma = 1;
+ spi_imx->usedma = true;
else
- spi_imx->usedma = 0;
+ spi_imx->usedma = false;
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cc49fa41fbab..bba10f030e33 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -170,7 +170,7 @@ static int jcore_spi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), pdev->name))
goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ hw->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!hw->base)
goto exit_busy;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index f3f10443f9e2..7f5680fe2568 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -19,7 +19,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
-#include <linux/gpio.h>
/*
* The Meson SPICC controller could support DMA based transfers, but is not
@@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
static int meson_spicc_setup(struct spi_device *spi)
{
- int ret = 0;
-
if (!spi->controller_state)
spi->controller_state = spi_master_get_devdata(spi->master);
- else if (gpio_is_valid(spi->cs_gpio))
- goto out_gpio;
- else if (spi->cs_gpio == -ENOENT)
- return 0;
-
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "failed to request cs gpio\n");
- return ret;
- }
- }
-
-out_gpio:
- ret = gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- return ret;
+ return 0;
}
static void meson_spicc_cleanup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-
spi->controller_state = NULL;
}
@@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
master->prepare_message = meson_spicc_prepare_message;
master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
master->transfer_one = meson_spicc_transfer_one;
+ master->use_gpio_descriptors = true;
/* Setup max rate according to the Meson GX datasheet */
if ((rate >> 2) > SPICC_MAX_FREQ)
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 996c1c8a9c71..dce85ee07cd0 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (ret)
goto out_master_free;
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(ssp->dev, "Failed to request DMA\n");
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_master_free;
}
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index cb52fd8008d0..d25ee32862e0 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -603,7 +603,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (!chip->flash_region_mapped_ptr) {
chip->flash_region_mapped_ptr =
- devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
+ devm_ioremap(fiu->dev, (fiu->res_mem->start +
(fiu->info->max_map_size *
desc->mem->spi->chip_select)),
(u32)desc->info.length);
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index fe624731c74c..87cd0233c60b 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -12,6 +12,7 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/reset.h>
#include <asm/unaligned.h>
@@ -20,7 +21,7 @@
struct npcm_pspi {
struct completion xfer_done;
- struct regmap *rst_regmap;
+ struct reset_control *reset;
struct spi_master *master;
unsigned int tx_bytes;
unsigned int rx_bytes;
@@ -59,12 +60,6 @@ struct npcm_pspi {
#define NPCM_PSPI_MIN_CLK_DIVIDER 4
#define NPCM_PSPI_DEFAULT_CLK 25000000
-/* reset register */
-#define NPCM7XX_IPSRST2_OFFSET 0x24
-
-#define NPCM7XX_PSPI1_RESET BIT(22)
-#define NPCM7XX_PSPI2_RESET BIT(23)
-
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : 2;
@@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
priv->mode = spi->mode;
}
+ /*
+ * If transfer is even length, and 8 bits per word transfer,
+ * then implement 16 bits-per-word transfer.
+ */
+ if (priv->bits_per_word == 8 && !(t->len & 0x1))
+ t->bits_per_word = 16;
+
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
npcm_pspi_set_transfer_size(priv, t->bits_per_word);
priv->bits_per_word = t->bits_per_word;
@@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
static void npcm_pspi_send(struct npcm_pspi *priv)
{
int wsize;
+ u16 val;
wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
priv->tx_bytes -= wsize;
@@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv)
switch (wsize) {
case 1:
- iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ iowrite8(val, NPCM_PSPI_DATA + priv->base);
break;
case 2:
- iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ val = *priv->tx_buf++ | (val << 8);
+ iowrite16(val, NPCM_PSPI_DATA + priv->base);
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- priv->tx_buf += wsize;
}
static void npcm_pspi_recv(struct npcm_pspi *priv)
@@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv)
switch (rsize) {
case 1:
- val = ioread8(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
break;
case 2:
val = ioread16(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = (val >> 8);
+ *priv->rx_buf++ = val & 0xff;
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- *priv->rx_buf = val;
- priv->rx_buf += rsize;
}
static int npcm_pspi_transfer_one(struct spi_master *master,
@@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
{
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
- NPCM7XX_PSPI1_RESET << priv->id);
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+ reset_control_assert(priv->reset);
+ udelay(5);
+ reset_control_deassert(priv->reset);
}
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
@@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
if (num_cs < 0)
return num_cs;
- pdev->id = of_alias_get_id(np, "spi");
- if (pdev->id < 0)
- pdev->id = 0;
-
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
@@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
priv = spi_master_get_devdata(master);
priv->master = master;
priv->is_save_param = false;
- priv->id = pdev->id;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev)
goto out_disable_clk;
}
- priv->rst_regmap =
- syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
- if (IS_ERR(priv->rst_regmap)) {
- dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
- return PTR_ERR(priv->rst_regmap);
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto out_disable_clk;
}
/* reset SPI-HW block */
@@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
master->mode_bits = SPI_CPHA | SPI_CPOL;
master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
+ master->bus_num = -1;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->transfer_one = npcm_pspi_transfer_one;
master->prepare_transfer_hardware =
@@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk;
- pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+ pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
return 0;
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index e2331eb7b47a..9df7c5979c29 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -20,7 +20,6 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_oc_tiny.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#define DRV_NAME "spi_oc_tiny"
@@ -50,8 +49,6 @@ struct tiny_spi {
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
- int gpio_cs_count;
- int *gpio_cs;
};
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
@@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
}
-static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
-{
- struct tiny_spi *hw = tiny_spi_to_hw(spi);
-
- if (hw->gpio_cs_count > 0) {
- gpio_set_value(hw->gpio_cs[spi->chip_select],
- (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
- }
-}
-
static int tiny_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
- unsigned int i;
u32 val;
if (!np)
return 0;
- hw->gpio_cs_count = of_gpio_count(np);
- if (hw->gpio_cs_count > 0) {
- hw->gpio_cs = devm_kcalloc(&pdev->dev,
- hw->gpio_cs_count, sizeof(unsigned int),
- GFP_KERNEL);
- if (!hw->gpio_cs)
- return -ENOMEM;
- }
- for (i = 0; i < hw->gpio_cs_count; i++) {
- hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
- if (hw->gpio_cs[i] < 0)
- return -ENODEV;
- }
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
@@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
- unsigned int i;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
@@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the master state. */
master->bus_num = pdev->id;
- master->num_chipselect = 255;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tiny_spi_setup;
+ master->use_gpio_descriptors = true;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
@@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
- hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
@@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
}
/* find platform data */
if (platp) {
- hw->gpio_cs_count = platp->gpio_cs_count;
- hw->gpio_cs = platp->gpio_cs;
- if (platp->gpio_cs_count && !platp->gpio_cs) {
- err = -EBUSY;
- goto exit;
- }
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
@@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- for (i = 0; i < hw->gpio_cs_count; i++) {
- err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
- if (err)
- goto exit_gpio;
- gpio_direction_output(hw->gpio_cs[i], 1);
- }
- hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
@@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
return 0;
-exit_gpio:
- while (i-- > 0)
- gpio_free(hw->gpio_cs[i]);
exit:
spi_master_put(master);
return err;
@@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
- unsigned int i;
spi_bitbang_stop(&hw->bitbang);
- for (i = 0; i < hw->gpio_cs_count; i++)
- gpio_free(hw->gpio_cs[i]);
spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 9071333ebdd8..4c7a71f0fb3e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
return limit;
}
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+ /* On MMP, disabling SSE seems to corrupt the rx fifo */
+ if (drv_data->ssp_type == MMP2_SSP)
+ return;
+
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+}
+
static int null_writer(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
@@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
@@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+ if (drv_data->ssp_type != MMP2_SSP)
+ pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* first set CR1 without interrupt and service enables */
@@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
@@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
pxa2xx_spi_write(drv_data, SSCR1,
@@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
return 0;
}
@@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
/* KBL-H */
{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 250fd60e1678..3c4f83bf7084 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -137,7 +137,7 @@ enum qspi_clocks {
struct qcom_qspi {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data clks[QSPI_NUM_CLKS];
+ struct clk_bulk_data *clks;
struct qspi_xfer xfer;
/* Lock to protect xfer and IRQ accessed registers */
spinlock_t lock;
@@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
goto exit_probe_master_put;
}
+ ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+ sizeof(*ctrl->clks), GFP_KERNEL);
+ if (!ctrl->clks) {
+ ret = -ENOMEM;
+ goto exit_probe_master_put;
+ }
+
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 7222c7689c3c..85575d45901c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -159,7 +159,7 @@
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
-#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
@@ -242,6 +242,7 @@ struct spi_ops {
u16 mode_bits;
u16 flags;
u16 fifo_size;
+ u8 num_hw_ss;
};
/*
@@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
return n;
}
-#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -620,9 +619,8 @@ no_dma_tx:
dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->ctlr->dev),
- dev_name(&rspi->ctlr->dev));
+ dev_warn_once(&rspi->ctlr->dev,
+ "DMA not available, falling back to PIO\n");
}
return ret;
}
@@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ /* Configure slave signal to assert */
+ rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+ : spi->chip_select);
+
/* CMOS output mode and MOSI signal from previous transfer */
rspi->sppcr = 0;
if (spi->mode & SPI_LOOP)
rspi->sppcr |= SPPCR_SPLP;
- set_config_register(rspi, 8);
+ rspi->ops->set_config_register(rspi, 8);
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
@@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
+ .num_hw_ss = 2,
};
static const struct spi_ops rspi_rz_ops = {
@@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
+ .num_hw_ss = 1,
};
static const struct spi_ops qspi_ops = {
@@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = {
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
+ .num_hw_ss = 1,
};
#ifdef CONFIG_OF
@@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->mode_bits = ops->mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = rspi->ops->num_hw_ss;
ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
@@ -1314,8 +1321,6 @@ error1:
static const struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
- { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
- { "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8f134735291f..1c11a00a2c36 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -14,8 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -55,7 +53,6 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
- unsigned short unused_ss;
bool native_cs_inited;
bool native_cs_high;
bool slave_aborted;
@@ -63,140 +60,140 @@ struct sh_msiof_spi_priv {
#define MAX_SS 3 /* Maximum number of native chip selects */
-#define TMDR1 0x00 /* Transmit Mode Register 1 */
-#define TMDR2 0x04 /* Transmit Mode Register 2 */
-#define TMDR3 0x08 /* Transmit Mode Register 3 */
-#define RMDR1 0x10 /* Receive Mode Register 1 */
-#define RMDR2 0x14 /* Receive Mode Register 2 */
-#define RMDR3 0x18 /* Receive Mode Register 3 */
-#define TSCR 0x20 /* Transmit Clock Select Register */
-#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
-#define CTR 0x28 /* Control Register */
-#define FCTR 0x30 /* FIFO Control Register */
-#define STR 0x40 /* Status Register */
-#define IER 0x44 /* Interrupt Enable Register */
-#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
-#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
-#define TFDR 0x50 /* Transmit FIFO Data Register */
-#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
-#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
-#define RFDR 0x60 /* Receive FIFO Data Register */
-
-/* TMDR1 and RMDR1 */
-#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
-#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
-#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
-#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT 2
-#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
-/* TMDR1 */
-#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* TMDR2 and RMDR2 */
-#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
-
-/* TSCR and RSCR */
-#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SCR_BRPS(i) (((i) - 1) << 8)
-#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0
-#define SCR_BRDV_DIV_4 1
-#define SCR_BRDV_DIV_8 2
-#define SCR_BRDV_DIV_16 3
-#define SCR_BRDV_DIV_32 4
-#define SCR_BRDV_DIV_1 7
-
-/* CTR */
-#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
-#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
-#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
-#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE BIT(9) /* Transmit Enable */
-#define CTR_RXE BIT(8) /* Receive Enable */
-#define CTR_TXRST BIT(1) /* Transmit Reset */
-#define CTR_RXRST BIT(0) /* Receive Reset */
-
-/* FCTR */
-#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT 20
-#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT 4
-#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
-
-/* STR */
-#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
-#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
-#define STR_TEOF BIT(23) /* Frame Transmission End */
-#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
-#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
-#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
-#define STR_RFFUL BIT(13) /* Receive FIFO Full */
-#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
-#define STR_REOF BIT(7) /* Frame Reception End */
-#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
-#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
-#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
-
-/* IER */
-#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
-#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
-#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
-#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
-#define IER_REOFE BIT(7) /* Frame Reception End Enable */
-#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
+#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT 2
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i) (((i) - 1) << 8)
+#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2 0
+#define SISCR_BRDV_DIV_4 1
+#define SISCR_BRDV_DIV_8 2
+#define SISCR_BRDV_DIV_16 3
+#define SISCR_BRDV_DIV_32 4
+#define SISCR_BRDV_DIV_1 7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT 20
+#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT 4
+#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
return ioread16(p->mapbase + reg_offs);
default:
return ioread32(p->mapbase + reg_offs);
@@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
u32 value)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
iowrite16(value, p->mapbase + reg_offs);
break;
default:
@@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
u32 mask = clr | set;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data &= ~clr;
data |= set;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+ return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
(data & mask) == set, 1, 100);
}
@@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
struct sh_msiof_spi_priv *p = data;
/* just disable the interrupt and wake up */
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
complete(&p->done);
return IRQ_HANDLED;
@@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
{
- u32 mask = CTR_TXRST | CTR_RXRST;
+ u32 mask = SICTR_TXRST | SICTR_RXRST;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data |= mask;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+ readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
100);
}
static const u32 sh_msiof_spi_div_array[] = {
- SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
- SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
+ SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+ SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
@@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
div = DIV_ROUND_UP(parent_rate, spi_hz);
if (div <= 1024) {
- /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
if (!div_pow && div <= 32 && div > 2)
div_pow = 1;
@@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
brps = 32;
}
- scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
- sh_msiof_write(p, TSCR, scr);
+ scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, RSCR, scr);
+ sh_msiof_write(p, SIRSCR, scr);
}
static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
@@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return 0;
}
- val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
- val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
return val;
}
@@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
- tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
- tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
- tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+ tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_slave(p->ctlr)) {
- sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+ sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
- sh_msiof_write(p, TMDR1,
- tmp | MDR1_TRMD | TMDR1_PCON |
- (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ sh_msiof_write(p, SITMDR1,
+ tmp | SIMDR1_TRMD | SITMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
- sh_msiof_write(p, RMDR1, tmp);
+ sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
- tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
- tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+ tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+ tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << CTR_TEDG_SHIFT;
- tmp |= edge << CTR_REDG_SHIFT;
- tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
- sh_msiof_write(p, CTR, tmp);
+ tmp |= edge << SICTR_TEDG_SHIFT;
+ tmp |= edge << SICTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+ u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, TMDR2, dr2);
+ sh_msiof_write(p, SITMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
if (rx_buf)
- sh_msiof_write(p, RMDR2, dr2);
+ sh_msiof_write(p, SIRMDR2, dr2);
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR,
- sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
+ sh_msiof_write(p, SISTR,
+ sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_8[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
@@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_16[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
@@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_32[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+ sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+ sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
@@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
@@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
@@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+ buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+ put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
}
static int sh_msiof_spi_setup(struct spi_device *spi)
@@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
/* Configure native chip select mode/polarity early */
- clr = MDR1_SYNCMD_MASK;
- set = MDR1_SYNCMD_SPI;
+ clr = SIMDR1_SYNCMD_MASK;
+ set = SIMDR1_SYNCMD_SPI;
if (spi->mode & SPI_CS_HIGH)
- clr |= BIT(MDR1_SYNCAC_SHIFT);
+ clr |= BIT(SIMDR1_SYNCAC_SHIFT);
else
- set |= BIT(MDR1_SYNCAC_SHIFT);
+ set |= BIT(SIMDR1_SYNCAC_SHIFT);
pm_runtime_get_sync(&p->pdev->dev);
- tmp = sh_msiof_read(p, TMDR1) & ~clr;
- sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
- tmp = sh_msiof_read(p, RMDR1) & ~clr;
- sh_msiof_write(p, RMDR1, tmp | set);
+ tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+ sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+ tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+ sh_msiof_write(p, SIRMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
p->native_cs_high = spi->mode & SPI_CS_HIGH;
p->native_cs_inited = true;
@@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
/* Configure pins before asserting CS */
if (spi->cs_gpiod) {
- ss = p->unused_ss;
+ ss = ctlr->unused_native_cs;
cs_high = p->native_cs_high;
} else {
ss = spi->chip_select;
@@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
/* setup clock and rx/tx signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
/* start by setting frame bit */
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
return ret;
}
@@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
/* shut down frame, rx/tx and clock signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
return ret;
}
@@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
fifo_shift = 32 - bits;
/* default FIFO watermarks for PIO */
- sh_msiof_write(p, FCTR, 0);
+ sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
- sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
if (tx_buf)
@@ -731,7 +728,7 @@ stop_reset:
sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx_buf);
stop_ier:
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
@@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
- ier_bits |= IER_RDREQE | IER_RDMAE;
+ ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
if (tx) {
- ier_bits |= IER_TDREQE | IER_TDMAE;
+ ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
@@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* 1 stage FIFO watermarks for DMA */
- sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+ sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
- sh_msiof_write(p, IER, ier_bits);
+ sh_msiof_write(p, SIIER, ier_bits);
reinit_completion(&p->done);
if (tx)
@@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
if (ret)
goto stop_reset;
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
} else {
/* wait for tx fifo to be emptied */
- sh_msiof_write(p, IER, IER_TEOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE);
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
@@ -856,7 +853,7 @@ stop_dma:
no_dma_tx:
if (rx)
dmaengine_terminate_all(p->ctlr->dma_rx);
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
@@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
-static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
-{
- struct device *dev = &p->pdev->dev;
- unsigned int used_ss_mask = 0;
- unsigned int cs_gpios = 0;
- unsigned int num_cs, i;
- int ret;
-
- ret = gpiod_count(dev, "cs");
- if (ret <= 0)
- return 0;
-
- num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
- for (i = 0; i < num_cs; i++) {
- struct gpio_desc *gpiod;
-
- gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
- if (!IS_ERR(gpiod)) {
- devm_gpiod_put(dev, gpiod);
- cs_gpios++;
- continue;
- }
-
- if (PTR_ERR(gpiod) != -ENOENT)
- return PTR_ERR(gpiod);
-
- if (i >= MAX_SS) {
- dev_err(dev, "Invalid native chip select %d\n", i);
- return -EINVAL;
- }
- used_ss_mask |= BIT(i);
- }
- p->unused_ss = ffz(used_ss_mask);
- if (cs_gpios && p->unused_ss >= MAX_SS) {
- dev_err(dev, "No unused native chip select available\n");
- return -EINVAL;
- }
- return 0;
-}
-
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
ctlr = p->ctlr;
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- dma_tx_id, res->start + TFDR);
+ dma_tx_id, res->start + SITFDR);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- dma_rx_id, res->start + RFDR);
+ dma_rx_id, res->start + SIRFDR);
if (!ctlr->dma_rx)
goto free_tx_chan;
@@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* Setup GPIO chip selects */
- ctlr->num_chipselect = p->info->num_chipselect;
- ret = sh_msiof_get_cs_gpios(p);
- if (ret)
- goto err1;
-
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = p->info->num_chipselect;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
@@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = MAX_SS;
ret = sh_msiof_request_dma(p);
if (ret < 0)
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index e1e639191557..8419e6722e17 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
- sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
- if (!sspi->rx_chan) {
+ sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(sspi->rx_chan)) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->rx_chan);
goto free_master;
}
- sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
- if (!sspi->tx_chan) {
+ sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(sspi->tx_chan)) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->tx_chan);
goto free_rx_dma;
}
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 4e726929bb4f..4ef569b47aa6 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
return 0;
}
-static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
{
struct dma_slave_config dma_cfg;
struct device *dev = qspi->dev;
+ int ret = 0;
memset(&dma_cfg, 0, sizeof(dma_cfg));
@@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
dma_cfg.src_maxburst = 4;
dma_cfg.dst_maxburst = 4;
- qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
- if (qspi->dma_chrx) {
+ qspi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(qspi->dma_chrx)) {
+ ret = PTR_ERR(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto out;
+ } else {
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
dev_err(dev, "dma rx config failed\n");
dma_release_channel(qspi->dma_chrx);
@@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
}
}
- qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
- if (qspi->dma_chtx) {
+ qspi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(qspi->dma_chtx)) {
+ ret = PTR_ERR(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ } else {
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
dev_err(dev, "dma tx config failed\n");
dma_release_channel(qspi->dma_chtx);
@@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
}
}
+out:
init_completion(&qspi->dma_completion);
+
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
@@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
- stm32_qspi_dma_setup(qspi);
+ ret = stm32_qspi_dma_setup(qspi);
+ if (ret)
+ goto err;
+
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index b222ce8d083e..e041f9c4ec47 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
}
/**
- * stm32_spi_setup - setup device chip select
- */
-static int stm32_spi_setup(struct spi_device *spi_dev)
-{
- int ret = 0;
-
- if (!gpio_is_valid(spi_dev->cs_gpio)) {
- dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
- spi_dev->cs_gpio);
- return -EINVAL;
- }
-
- dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
- spi_dev->cs_gpio,
- (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
-
- ret = gpio_direction_output(spi_dev->cs_gpio,
- !(spi_dev->mode & SPI_CS_HIGH));
-
- return ret;
-}
-
-/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
*/
static int stm32_spi_prepare_msg(struct spi_master *master,
@@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct stm32_spi *spi;
struct resource *res;
- int i, ret;
+ int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
if (!master) {
@@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
- master->setup = stm32_spi_setup;
+ master->use_gpio_descriptors = true;
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
- if (!spi->dma_tx)
+ spi->dma_tx = dma_request_chan(spi->dev, "tx");
+ if (IS_ERR(spi->dma_tx)) {
+ ret = PTR_ERR(spi->dma_tx);
+ spi->dma_tx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
- else
+ } else {
master->dma_tx = spi->dma_tx;
+ }
+
+ spi->dma_rx = dma_request_chan(spi->dev, "rx");
+ if (IS_ERR(spi->dma_rx)) {
+ ret = PTR_ERR(spi->dma_rx);
+ spi->dma_rx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma_release;
- spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
- if (!spi->dma_rx)
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
- else
+ } else {
master->dma_rx = spi->dma_rx;
+ }
if (spi->dma_tx || spi->dma_rx)
master->can_dma = stm32_spi_can_dma;
@@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "spi master registration failed: %d\n",
ret);
- goto err_dma_release;
+ goto err_pm_disable;
}
- if (!master->cs_gpios) {
+ if (!master->cs_gpiods) {
dev_err(&pdev->dev, "no CS gpios available\n");
ret = -EINVAL;
- goto err_dma_release;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i])) {
- dev_err(&pdev->dev, "%i is not a valid gpio\n",
- master->cs_gpios[i]);
- ret = -EINVAL;
- goto err_dma_release;
- }
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "can't get CS gpio %i\n",
- master->cs_gpios[i]);
- goto err_dma_release;
- }
+ goto err_pm_disable;
}
dev_info(&pdev->dev, "driver initialized\n");
return 0;
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
if (spi->dma_rx)
dma_release_channel(spi->dma_rx);
-
- pm_runtime_disable(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(spi->clk);
err_master_put:
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index fc40ab146c86..83edabdb41ad 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
- tspi->is_packed = 1;
+ tspi->is_packed = true;
tspi->words_per_32bit = 32/bits_per_word;
} else {
- tspi->is_packed = 0;
+ tspi->is_packed = false;
tspi->words_per_32bit = 1;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 66dcb6128539..366a3e5cca6b 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -80,8 +80,6 @@ struct ti_qspi {
#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-#define QSPI_FCLK 192000000
-
/* Clock Control */
#define QSPI_CLK_EN (1 << 31)
#define QSPI_CLK_DIV_MAX 0xffff
@@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
{
int wlen;
unsigned int cmd;
+ u32 rx;
+ u8 rxlen, rx_wlen;
u8 *rxbuf;
rxbuf = t->rx_buf;
@@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
break;
}
wlen = t->bits_per_word >> 3; /* in bytes */
+ rx_wlen = wlen;
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
if (qspi_is_busy(qspi))
return -EBUSY;
+ switch (wlen) {
+ case 1:
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ rxlen = QSPI_WLEN_MAX_BYTES;
+ } else {
+ rxlen = min(count, 4);
+ }
+ rx_wlen = rxlen << 3;
+ cmd &= ~QSPI_WLEN_MASK;
+ cmd |= QSPI_WLEN(rx_wlen);
+ break;
+ default:
+ rxlen = wlen;
+ break;
+ }
+
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
+
switch (wlen) {
case 1:
- *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *rxp = (u32 *) rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ *rxp++ = be32_to_cpu(rx);
+ } else {
+ u8 *rxp = rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ if (rx_wlen >= 8)
+ *rxp++ = rx >> (rx_wlen - 8);
+ if (rx_wlen >= 16)
+ *rxp++ = rx >> (rx_wlen - 16);
+ if (rx_wlen >= 24)
+ *rxp++ = rx >> (rx_wlen - 24);
+ if (rx_wlen >= 32)
+ *rxp++ = rx;
+ }
break;
case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
@@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
break;
}
- rxbuf += wlen;
- count -= wlen;
+ rxbuf += rxlen;
+ count -= rxlen;
}
return 0;
@@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
QSPI_SPI_SETUP_REG(spi->chip_select));
}
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ size_t max_len;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.val < qspi->mmap_size) {
+ /* Limit MMIO to the mmaped region */
+ if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+ max_len = qspi->mmap_size - op->addr.val;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ } else {
+ /*
+ * Use fallback mode (SW generated transfers) above the
+ * mmaped region.
+ * Adjust size to comply with the QSPI max frame length.
+ */
+ max_len = QSPI_FRAME;
+ max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ }
+
+ return 0;
+}
+
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
@@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.exec_op = ti_qspi_exec_mem_op,
+ .adjust_op_size = ti_qspi_adjust_op_size,
};
static int ti_qspi_start_transfer_one(struct spi_master *master,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 223353fa2d8a..d7ea6af74743 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index ce9b30112e26..0fa50979644d 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@
struct uniphier_spi_priv {
void __iomem *base;
+ dma_addr_t base_dma_addr;
struct clk *clk;
struct spi_master *master;
struct completion xfer_done;
@@ -32,6 +34,7 @@ struct uniphier_spi_priv {
unsigned int rx_bytes;
const u8 *tx_buf;
u8 *rx_buf;
+ atomic_t dma_busy;
bool is_save_param;
u8 bits_per_word;
@@ -61,11 +64,16 @@ struct uniphier_spi_priv {
#define SSI_FPS_FSTRT BIT(14)
#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
#define SSI_SR_RNE BIT(0)
#define SSI_IE 0x18
+#define SSI_IE_TCIE BIT(4)
#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_TXRE BIT(2)
+#define SSI_IE_RXRE BIT(1)
#define SSI_IE_RORIE BIT(0)
+#define SSI_IE_ALL_MASK GENMASK(4, 0)
#define SSI_IS 0x1c
#define SSI_IS_RXRS BIT(9)
@@ -87,15 +95,19 @@ struct uniphier_spi_priv {
#define SSI_RXDR 0x24
#define SSI_FIFO_DEPTH 8U
+#define SSI_FIFO_BURST_NUM 1
+
+#define SSI_DMA_RX_BUSY BIT(1)
+#define SSI_DMA_TX_BUSY BIT(0)
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
}
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
@@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
writel(val, priv->base + SSI_IE);
}
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
@@ -334,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
writel(val, priv->base + SSI_FPS);
}
+static bool uniphier_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ if ((!master->dma_tx && !master->dma_rx)
+ || (!master->dma_tx && t->tx_buf)
+ || (!master->dma_rx && t->rx_buf))
+ return false;
+
+ return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+ if (!(state & SSI_DMA_TX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+ if (!(state & SSI_DMA_RX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ int buswidth;
+
+ atomic_set(&priv->dma_busy, 0);
+
+ uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+ if (priv->bits_per_word <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (priv->bits_per_word <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ if (priv->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = priv->base_dma_addr + SSI_RXDR,
+ .src_addr_width = buswidth,
+ .src_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ master->dma_rx,
+ t->rx_sg.sgl, t->rx_sg.nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto out_err_prep;
+
+ rxdesc->callback = uniphier_spi_dma_rxcb;
+ rxdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+ atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (priv->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = priv->base_dma_addr + SSI_TXDR,
+ .dst_addr_width = buswidth,
+ .dst_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ master->dma_tx,
+ t->tx_sg.sgl, t->tx_sg.nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto out_err_prep;
+
+ txdesc->callback = uniphier_spi_dma_txcb;
+ txdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+ atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ /* signal that we need to wait for completion */
+ return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+ if (rxdesc)
+ dmaengine_terminate_sync(master->dma_rx);
+
+ return -EINVAL;
+}
+
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
@@ -346,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
uniphier_spi_fill_tx_fifo(priv);
- uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
time_left = wait_for_completion_timeout(&priv->xfer_done,
msecs_to_jiffies(SSI_TIMEOUT_MS));
- uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
if (!time_left) {
dev_err(dev, "transfer timeout.\n");
@@ -395,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
unsigned long threshold;
+ bool use_dma;
/* Terminate and return success for 0 byte length transfer */
if (!t->len)
@@ -402,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
uniphier_spi_setup_transfer(spi, t);
+ use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ if (use_dma)
+ return uniphier_spi_transfer_one_dma(master, spi, t);
+
/*
* If the transfer operation will take longer than
* SSI_POLL_TIMEOUT_US, it should use irq.
@@ -432,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
return 0;
}
+static void uniphier_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ u32 val;
+
+ /* stop running spi transfer */
+ writel(0, priv->base + SSI_CTL);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+ dmaengine_terminate_async(master->dma_tx);
+ atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+ }
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+ dmaengine_terminate_async(master->dma_rx);
+ atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+ }
+}
+
static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
{
struct uniphier_spi_priv *priv = dev_id;
@@ -477,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
struct spi_master *master;
+ struct resource *res;
+ struct dma_slave_caps caps;
+ u32 dma_tx_burst = 0, dma_rx_burst = 0;
unsigned long clk_rate;
int irq;
int ret;
@@ -491,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev)
priv->master = master;
priv->is_save_param = false;
- priv->base = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
}
+ priv->base_dma_addr = res->start;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -538,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev)
= uniphier_spi_prepare_transfer_hardware;
master->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
+ master->handle_err = uniphier_spi_handle_err;
+ master->can_dma = uniphier_spi_can_dma;
+
master->num_chipselect = 1;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_tx = NULL;
+ dma_tx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_tx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_tx_burst = caps.max_burst;
+ }
+
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_rx = NULL;
+ dma_rx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_rx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_rx_burst = caps.max_burst;
+ }
+
+ master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
@@ -558,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+ if (priv->master->dma_tx)
+ dma_release_channel(priv->master->dma_tx);
+ if (priv->master->dma_rx)
+ dma_release_channel(priv->master->dma_rx);
+
clk_disable_unprepare(priv->clk);
return 0;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8994545367a2..38b4c78df506 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1674,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
+ if (unlikely(ctlr->ptp_sts_supported)) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -2451,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
nb = gpiod_count(dev, "cs");
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
@@ -2492,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
}
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffz(native_cs_mask);
+ if (num_cs_gpios && ctlr->max_native_cs &&
+ ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 06b68dd6e022..bc275968fcc6 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -63,7 +63,7 @@ int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports
for (i = 0; i < 2; i++) {
void __iomem *uart_regs;
- uart_regs = ioremap_nocache(SSB_EUART, 16);
+ uart_regs = ioremap(SSB_EUART, 16);
if (uart_regs) {
uart_regs += (i * 8);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 6a5622e0ded5..c1186415896b 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -122,7 +122,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap(addr, len);
if (!mmio)
goto out;
@@ -168,7 +168,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap(addr, len);
if (!mmio)
goto out;
@@ -382,7 +382,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000);
+ ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000);
set_io_port_base(ssb_pcicore_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
* values. Not waiting at this point causes crashes of the machine. */
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index 673d732dcb8f..8f398b30f5bf 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family,
}
}
- if (!rv)
- return -ENODATA;
-
/* Second, find the set of routes valid for this device. */
for (i = 0; ni_device_routes_list[i]; ++i) {
if (memcmp(ni_device_routes_list[i]->device, board_name,
@@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family,
}
}
- if (!dr)
- return -ENODATA;
-
tables->route_values = rv;
tables->valid_routes = dr;
+ if (!rv || !dr)
+ return -ENODATA;
+
return 0;
}
@@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest,
{
int src;
+ if (!tables->route_values)
+ return -EINVAL;
+
dest = B(dest); /* subtract NI names offset */
/* ensure we are not going to under/over run the route value table */
if (dest < 0 || dest >= NI_NUM_NAMES)
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index cd8be80d2076..be6b50f454b4 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -303,7 +303,7 @@ static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
}
gasket_dev->bar_data[bar_num].virt_base =
- ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
+ ioremap(gasket_dev->bar_data[bar_num].phys_base,
gasket_dev->bar_data[bar_num].length_bytes);
if (!gasket_dev->bar_data[bar_num].virt_base) {
dev_err(gasket_dev->dev,
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index 0a23727d0dc3..93cf28febdf6 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -338,7 +338,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR);
reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR);
- pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE);
+ pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE);
if (!pcard->regs_bar_base) {
dev_err(&pcard->pdev->dev,
"probe: REG_BAR could not remap memory to virtual space\n");
@@ -367,7 +367,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR);
dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
- pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr,
+ pcard->dma_bar_base = ioremap(dma_bar_phys_addr,
dma_bar_phys_len);
if (!pcard->dma_bar_base) {
dev_err(&pcard->pdev->dev,
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index 5460bf973c9c..592099a1fca5 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -659,7 +659,7 @@ static int pi2c_probe(struct platform_device *pldev)
if (!res)
return -ENXIO;
- priv->smba = (unsigned long)devm_ioremap_nocache(&pldev->dev,
+ priv->smba = (unsigned long)devm_ioremap(&pldev->dev,
res->start,
resource_size(res));
if (!priv->smba)
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 8becf972af9c..1c360daa703d 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -464,7 +464,7 @@ kp_spi_probe(struct platform_device *pldev)
goto free_master;
}
- kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start,
+ kpspi->base = devm_ioremap(&pldev->dev, r->start,
resource_size(r));
status = spi_register_master(master);
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
index a05ae6d40db9..ec79a8500caf 100644
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
@@ -122,7 +122,7 @@ int kpc_dma_probe(struct platform_device *pldev)
rv = -ENXIO;
goto err_kfree;
}
- ldev->eng_regs = ioremap_nocache(r->start, resource_size(r));
+ ldev->eng_regs = ioremap(r->start, resource_size(r));
if (!ldev->eng_regs) {
dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__);
rv = -ENXIO;
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 3cffc8be6656..211dd4a11cac 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -45,7 +45,7 @@ struct wep_key {
* function prototypes
*/
static int ks_wlan_open(struct net_device *dev);
-static void ks_wlan_tx_timeout(struct net_device *dev);
+static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int ks_wlan_close(struct net_device *dev);
static void ks_wlan_set_rx_mode(struct net_device *dev);
@@ -2498,7 +2498,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
}
static
-void ks_wlan_tx_timeout(struct net_device *dev)
+void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ks_wlan_private *priv = netdev_priv(dev);
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
index 6f0cd0784786..3be41698df4c 100644
--- a/drivers/staging/media/allegro-dvt/allegro-core.c
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -2914,7 +2914,7 @@ static int allegro_probe(struct platform_device *pdev)
"regs resource missing from device tree\n");
return -EINVAL;
}
- regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (IS_ERR(regs)) {
dev_err(&pdev->dev, "failed to map registers\n");
return PTR_ERR(regs);
@@ -2932,7 +2932,7 @@ static int allegro_probe(struct platform_device *pdev)
"sram resource missing from device tree\n");
return -EINVAL;
}
- sram_regs = devm_ioremap_nocache(&pdev->dev,
+ sram_regs = devm_ioremap(&pdev->dev,
sram_res->start,
resource_size(sram_res));
if (IS_ERR(sram_regs)) {
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6ad4515311f7..ed5440d51332 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -4274,7 +4274,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
return status;
}
-static void qlge_tx_timeout(struct net_device *ndev)
+static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
@@ -4455,7 +4455,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
- ioremap_nocache(pci_resource_start(pdev, 1),
+ ioremap(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
@@ -4465,7 +4465,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
- ioremap_nocache(pci_resource_start(pdev, 3),
+ ioremap(pci_resource_start(pdev, 3),
pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index dace81a7d1ba..60c652793f93 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -267,7 +267,7 @@ static short _rtl92e_check_nic_enough_desc(struct net_device *dev, int prio)
return 0;
}
-static void _rtl92e_tx_timeout(struct net_device *dev)
+static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2463,7 +2463,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
}
- ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
+ ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
if (ioaddr == (unsigned long)NULL) {
netdev_err(dev, "ioremap failed!");
goto err_rel_mem;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 7e2cabd16e88..482382a887f8 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -640,7 +640,7 @@ short check_nic_enough_desc(struct net_device *dev, int queue_index)
return (used < MAX_TX_URB);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = ieee80211_priv(dev);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index cb95ad6fa4f9..fbb42e5258fd 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -858,7 +858,7 @@ static int rtsx_probe(struct pci_dev *pci,
dev_info(&pci->dev, "Resource length: 0x%x\n",
(unsigned int)pci_resource_len(pci, 0));
dev->addr = pci_resource_start(pci, 0);
- dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
+ dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0));
if (!dev->remap_addr) {
dev_err(&pci->dev, "ioremap error\n");
err = -ENXIO;
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index ea1d3d4efbc2..b8d60701f898 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -50,7 +50,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
/* now map mmio and vidmem */
- sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start,
+ sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start,
sm750_dev->vidreg_size);
if (!sm750_dev->pvReg) {
pr_err("mmio failed\n");
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 1d1440d43002..0433536930a9 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1078,7 +1078,7 @@ out_save_flags:
* Queue the work and return. Make sure we have not already been informed that
* the IO Partition is gone; if so, we will have already timed-out the xmits.
*/
-static void visornic_xmit_timeout(struct net_device *netdev)
+static void visornic_xmit_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct visornic_devdata *devdata = netdev_priv(netdev);
unsigned long flags;
diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c
index 34020ed351ab..a5ab255d7d36 100644
--- a/drivers/staging/uwb/whc-rc.c
+++ b/drivers/staging/uwb/whc-rc.c
@@ -216,11 +216,11 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc)
goto error_request_region;
}
- whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
+ whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len);
if (whcrc->rc_base == NULL) {
dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
- goto error_ioremap_nocache;
+ goto error_ioremap;
}
result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
@@ -254,7 +254,7 @@ error_cmd_buffer:
free_irq(umc_dev->irq, whcrc);
error_request_irq:
iounmap(whcrc->rc_base);
-error_ioremap_nocache:
+error_ioremap:
release_mem_region(whcrc->area, whcrc->rc_len);
error_request_region:
return result;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a70fb84f38f1..b809c0015c0c 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -101,7 +101,7 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev);
static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd);
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
-static void p80211knetdev_tx_timeout(struct net_device *netdev);
+static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
int wlan_watchdog = 5000;
@@ -1074,7 +1074,7 @@ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
return drop;
}
-static void p80211knetdev_tx_timeout(struct net_device *netdev)
+static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct wlandevice *wlandev = netdev->ml_priv;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7251a87bb576..b94ed4e30770 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4149,9 +4149,6 @@ int iscsit_close_connection(
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
- if (conn->conn_transport->iscsit_wait_conn)
- conn->conn_transport->iscsit_wait_conn(conn);
-
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@@ -4237,6 +4234,9 @@ int iscsit_close_connection(
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index cf3fad2cb871..c5b17dd8f587 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -47,7 +47,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
for (slot = 0; slot < tbus->num_tcslots; slot++) {
slotaddr = tbus->slot_base + slot * slotsize;
extslotaddr = tbus->ext_slot_base + slot * extslotsize;
- module = ioremap_nocache(slotaddr, slotsize);
+ module = ioremap(slotaddr, slotsize);
BUG_ON(!module);
offset = TC_OLDCARD;
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 676ffcb64985..8da63f38e6bd 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -2,7 +2,7 @@
# Generic Trusted Execution Environment Configuration
config TEE
tristate "Trusted Execution Environment support"
- depends on HAVE_ARM_SMCCC || COMPILE_TEST
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -14,7 +14,7 @@ if TEE
menu "TEE drivers"
source "drivers/tee/optee/Kconfig"
-
+source "drivers/tee/amdtee/Kconfig"
endmenu
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 21f51fd88b07..68da044afbfa 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -4,3 +4,4 @@ tee-objs += tee_core.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_AMDTEE) += amdtee/
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
new file mode 100644
index 000000000000..4e32b6413b41
--- /dev/null
+++ b/drivers/tee/amdtee/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+# AMD-TEE Trusted Execution Environment Configuration
+config AMDTEE
+ tristate "AMD-TEE"
+ default m
+ depends on CRYPTO_DEV_SP_PSP
+ help
+ This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile
new file mode 100644
index 000000000000..ff1485266117
--- /dev/null
+++ b/drivers/tee/amdtee/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+obj-$(CONFIG_AMDTEE) += amdtee.o
+amdtee-objs += core.o
+amdtee-objs += call.o
+amdtee-objs += shm_pool.o
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
new file mode 100644
index 000000000000..ff48c3e47375
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+/*
+ * This file has definitions related to Host and AMD-TEE Trusted OS interface.
+ * These definitions must match the definitions on the TEE side.
+ */
+
+#ifndef AMDTEE_IF_H
+#define AMDTEE_IF_H
+
+#include <linux/types.h>
+
+/*****************************************************************************
+ ** TEE Param
+ ******************************************************************************/
+#define TEE_MAX_PARAMS 4
+
+/**
+ * struct memref - memory reference structure
+ * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ * @offset: offset in bytes from beginning of the buffer
+ * @size: data size in bytes
+ */
+struct memref {
+ u32 buf_id;
+ u32 offset;
+ u32 size;
+};
+
+struct value {
+ u32 a;
+ u32 b;
+};
+
+/*
+ * Parameters passed to open_session or invoke_command
+ */
+union tee_op_param {
+ struct memref mref;
+ struct value val;
+};
+
+struct tee_operation {
+ u32 param_types;
+ union tee_op_param params[TEE_MAX_PARAMS];
+};
+
+/* Must be same as in GP TEE specification */
+#define TEE_OP_PARAM_TYPE_NONE 0
+#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1
+#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2
+#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3
+#define TEE_OP_PARAM_TYPE_INVALID 4
+#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5
+#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6
+#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7
+
+#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF)
+#define TEE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*****************************************************************************
+ ** TEE Commands
+ *****************************************************************************/
+
+/*
+ * The shared memory between rich world and secure world may be physically
+ * non-contiguous. Below structures are meant to describe a shared memory region
+ * via scatter/gather (sg) list
+ */
+
+/**
+ * struct tee_sg_desc - sg descriptor for a physically contiguous buffer
+ * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned
+ * @hi_addr: [in] bits[63:32] of the buffer's physical address
+ * @size: [in] size in bytes (must be multiple of 4KB)
+ */
+struct tee_sg_desc {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+/**
+ * struct tee_sg_list - structure describing a scatter/gather list
+ * @count: [in] number of sg descriptors
+ * @size: [in] total size of all buffers in the list. Must be multiple of 4KB
+ * @buf: [in] list of sg buffer descriptors
+ */
+#define TEE_MAX_SG_DESC 64
+struct tee_sg_list {
+ u32 count;
+ u32 size;
+ struct tee_sg_desc buf[TEE_MAX_SG_DESC];
+};
+
+/**
+ * struct tee_cmd_map_shared_mem - command to map shared memory
+ * @buf_id: [out] return buffer ID value
+ * @sg_list: [in] list describing memory to be mapped
+ */
+struct tee_cmd_map_shared_mem {
+ u32 buf_id;
+ struct tee_sg_list sg_list;
+};
+
+/**
+ * struct tee_cmd_unmap_shared_mem - command to unmap shared memory
+ * @buf_id: [in] buffer ID of memory to be unmapped
+ */
+struct tee_cmd_unmap_shared_mem {
+ u32 buf_id;
+};
+
+/**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ */
+struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment
+ * @ta_handle: [in] handle of the loaded TA to be unloaded
+ */
+struct tee_cmd_unload_ta {
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [out] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_open_session {
+ u32 ta_handle;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint()
+ * in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [in] pointer to TA allocated session data
+ */
+struct tee_cmd_close_session {
+ u32 ta_handle;
+ u32 session_info;
+};
+
+/**
+ * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in
+ * TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @cmd_id: [in] TA command ID
+ * @session_info: [in] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_invoke_cmd {
+ u32 ta_handle;
+ u32 cmd_id;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+#endif /*AMDTEE_IF_H*/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
new file mode 100644
index 000000000000..d7f798c3394b
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#ifndef AMDTEE_PRIVATE_H
+#define AMDTEE_PRIVATE_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include "amdtee_if.h"
+
+#define DRIVER_NAME "amdtee"
+#define DRIVER_AUTHOR "AMD-TEE Linux driver team"
+
+/* Some GlobalPlatform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_GENERIC 0xFFFF0000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/* Maximum number of sessions which can be opened with a Trusted Application */
+#define TEE_NUM_SESSIONS 32
+
+#define TA_LOAD_PATH "/amdtee"
+#define TA_PATH_MAX 60
+
+/**
+ * struct amdtee - main service struct
+ * @teedev: client device
+ * @pool: shared memory pool
+ */
+struct amdtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct amdtee_session - Trusted Application (TA) session related information.
+ * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment
+ * @refcount: counter to keep track of sessions opened for the TA instance
+ * @session_info: an array pointing to TA allocated session data.
+ * @sess_mask: session usage bit-mask. If a particular bit is set, then the
+ * corresponding @session_info entry is in use or valid.
+ *
+ * Session structure is updated on open_session and this information is used for
+ * subsequent operations with the Trusted Application.
+ */
+struct amdtee_session {
+ struct list_head list_node;
+ u32 ta_handle;
+ struct kref refcount;
+ u32 session_info[TEE_NUM_SESSIONS];
+ DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS);
+ spinlock_t lock; /* synchronizes access to @sess_mask */
+};
+
+/**
+ * struct amdtee_context_data - AMD-TEE driver context data
+ * @sess_list: Keeps track of sessions opened in current TEE context
+ */
+struct amdtee_context_data {
+ struct list_head sess_list;
+};
+
+struct amdtee_driver_data {
+ struct amdtee *amdtee;
+};
+
+struct shmem_desc {
+ void *kaddr;
+ u64 size;
+};
+
+/**
+ * struct amdtee_shm_data - Shared memory data
+ * @kaddr: Kernel virtual address of shared memory
+ * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ */
+struct amdtee_shm_data {
+ struct list_head shm_node;
+ void *kaddr;
+ u32 buf_id;
+};
+
+struct amdtee_shm_context {
+ struct list_head shmdata_list;
+};
+
+#define LOWER_TWO_BYTE_MASK 0x0000FFFF
+
+/**
+ * set_session_id() - Sets the session identifier.
+ * @ta_handle: [in] handle of the loaded Trusted Application (TA)
+ * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1).
+ * @session: [out] Pointer to session id
+ *
+ * Lower two bytes of the session identifier represents the TA handle and the
+ * upper two bytes is session index.
+ */
+static inline void set_session_id(u32 ta_handle, u32 session_index,
+ u32 *session)
+{
+ *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle);
+}
+
+static inline u32 get_ta_handle(u32 session)
+{
+ return session & LOWER_TWO_BYTE_MASK;
+}
+
+static inline u32 get_session_index(u32 session)
+{
+ return (session >> 16) & LOWER_TWO_BYTE_MASK;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+int amdtee_close_session(struct tee_context *ctx, u32 session);
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+int amdtee_map_shmem(struct tee_shm *shm);
+
+void amdtee_unmap_shmem(struct tee_shm *shm);
+
+int handle_load_ta(void *data, u32 size,
+ struct tee_ioctl_open_session_arg *arg);
+
+int handle_unload_ta(u32 ta_handle);
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p);
+
+int handle_close_session(u32 ta_handle, u32 info);
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id);
+
+void handle_unmap_shmem(u32 buf_id);
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p);
+
+struct tee_shm_pool *amdtee_config_shm(void);
+
+u32 get_buffer_id(struct tee_shm *shm);
+#endif /*AMDTEE_PRIVATE_H*/
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
new file mode 100644
index 000000000000..096dd4d92d39
--- /dev/null
+++ b/drivers/tee/amdtee/call.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-tee.h>
+#include <linux/slab.h>
+#include <linux/psp-sev.h>
+#include "amdtee_if.h"
+#include "amdtee_private.h"
+
+static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ amd->param_types = 0;
+ for (i = 0; i < count; i++) {
+ /* AMD TEE does not support meta parameter */
+ if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
+ }
+
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE)
+ continue;
+
+ /* It is assumed that all values are within 2^32-1 */
+ if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
+ u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
+
+ amd->params[i].mref.buf_id = buf_id;
+ amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
+ amd->params[i].mref.size = tee[i].u.memref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ if (tee[i].u.value.c)
+ pr_warn("%s: Discarding value c", __func__);
+
+ amd->params[i].val.a = tee[i].u.value.a;
+ amd->params[i].val.b = tee[i].u.value.b;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ /* Assumes amd->param_types is valid */
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID ||
+ type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE ||
+ type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
+ type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
+ continue;
+
+ /*
+ * It is assumed that buf_id remains unchanged for
+ * both open_session and invoke_cmd call
+ */
+ if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
+ tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
+ tee[i].u.memref.size = amd->params[i].mref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ /* field 'c' not supported by AMD TEE */
+ tee[i].u.value.a = amd->params[i].val.a;
+ tee[i].u.value.b = amd->params[i].val.b;
+ tee[i].u.value.c = 0;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+int handle_unload_ta(u32 ta_handle)
+{
+ struct tee_cmd_unload_ta cmd = {0};
+ u32 status;
+ int ret;
+
+ if (!ta_handle)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("unload ta: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+int handle_close_session(u32 ta_handle, u32 info)
+{
+ struct tee_cmd_close_session cmd = {0};
+ u32 status;
+ int ret;
+
+ if (ta_handle == 0)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+ cmd.session_info = info;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("close session: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void handle_unmap_shmem(u32 buf_id)
+{
+ struct tee_cmd_unmap_shared_mem cmd = {0};
+ u32 status;
+ int ret;
+
+ cmd.buf_id = buf_id;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret)
+ pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
+ buf_id, status);
+}
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p)
+{
+ struct tee_cmd_invoke_cmd cmd = {0};
+ int ret;
+
+ if (!arg || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort invoke command\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ cmd.cmd_id = arg->func;
+ cmd.session_info = sinfo;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("invoke command: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
+ arg->ret_origin, arg->ret);
+ }
+
+ return ret;
+}
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
+{
+ struct tee_cmd_map_shared_mem *cmd;
+ phys_addr_t paddr;
+ int ret, i;
+ u32 status;
+
+ if (!count || !start || !buf_id)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* Size must be page aligned */
+ for (i = 0; i < count ; i++) {
+ if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+
+ if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
+ pr_err("map shared memory: page unaligned. addr 0x%llx",
+ (u64)start[i].kaddr);
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+ }
+
+ cmd->sg_list.count = count;
+
+ /* Create buffer list */
+ for (i = 0; i < count ; i++) {
+ paddr = __psp_pa(start[i].kaddr);
+ cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
+ cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
+ cmd->sg_list.buf[i].size = start[i].size;
+ cmd->sg_list.size += cmd->sg_list.buf[i].size;
+
+ pr_debug("buf[%d]:hi addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].hi_addr);
+ pr_debug("buf[%d]:low addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].low_addr);
+ pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
+ pr_debug("list size = 0x%x\n", cmd->sg_list.size);
+ }
+
+ *buf_id = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
+ sizeof(*cmd), &status);
+ if (!ret && !status) {
+ *buf_id = cmd->buf_id;
+ pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
+ } else {
+ pr_err("map shared memory: status = 0x%x\n", status);
+ ret = -ENOMEM;
+ }
+
+free_cmd:
+ kfree(cmd);
+
+ return ret;
+}
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p)
+{
+ struct tee_cmd_open_session cmd = {0};
+ int ret;
+
+ if (!arg || !info || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_GENERIC;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort open session\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ *info = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("open session: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ *info = cmd.session_info;
+ pr_debug("open session: session info = 0x%x\n", *info);
+ }
+
+ pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
+ arg->ret_origin);
+
+ return ret;
+}
+
+int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+{
+ struct tee_cmd_load_ta cmd = {0};
+ phys_addr_t blob;
+ int ret;
+
+ if (size == 0 || !data || !arg)
+ return -EINVAL;
+
+ blob = __psp_pa(data);
+ if (blob & (PAGE_SIZE - 1)) {
+ pr_err("load TA: page unaligned. blob 0x%llx", blob);
+ return -EINVAL;
+ }
+
+ cmd.hi_addr = upper_32_bits(blob);
+ cmd.low_addr = lower_32_bits(blob);
+ cmd.size = size;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ set_session_id(cmd.ta_handle, 0, &arg->session);
+ }
+
+ pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+ cmd.ta_handle, arg->ret_origin, arg->ret);
+
+ return 0;
+}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
new file mode 100644
index 000000000000..6370bb55f512
--- /dev/null
+++ b/drivers/tee/amdtee/core.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdtee_private.h"
+#include "../tee_private.h"
+#include <linux/psp-tee.h>
+
+static struct amdtee_driver_data *drv_data;
+static DEFINE_MUTEX(session_list_mutex);
+static struct amdtee_shm_context shmctx;
+
+static void amdtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_AMDTEE,
+ .impl_caps = 0,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int amdtee_open(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+ INIT_LIST_HEAD(&shmctx.shmdata_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void release_session(struct amdtee_session *sess)
+{
+ int i;
+
+ /* Close any open session */
+ for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
+ /* Check if session entry 'i' is valid */
+ if (!test_bit(i, sess->sess_mask))
+ continue;
+
+ handle_close_session(sess->ta_handle, sess->session_info[i]);
+ }
+
+ /* Unload Trusted Application once all sessions are closed */
+ handle_unload_ta(sess->ta_handle);
+ kfree(sess);
+}
+
+static void amdtee_release(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+
+ if (!ctxdata)
+ return;
+
+ while (true) {
+ struct amdtee_session *sess;
+
+ sess = list_first_entry_or_null(&ctxdata->sess_list,
+ struct amdtee_session,
+ list_node);
+
+ if (!sess)
+ break;
+
+ list_del(&sess->list_node);
+ release_session(sess);
+ }
+ kfree(ctxdata);
+
+ ctx->data = NULL;
+}
+
+/**
+ * alloc_session() - Allocate a session structure
+ * @ctxdata: TEE Context data structure
+ * @session: Session ID for which 'struct amdtee_session' structure is to be
+ * allocated.
+ *
+ * Scans the TEE context's session list to check if TA is already loaded in to
+ * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
+ * initializes a new 'session' structure and adds it to context's session list.
+ *
+ * The caller must hold a mutex.
+ *
+ * Returns:
+ * 'struct amdtee_session *' on success and NULL on failure.
+ */
+static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ struct amdtee_session *sess;
+ u32 ta_handle = get_ta_handle(session);
+
+ /* Scan session list to check if TA is already loaded in to TEE */
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->ta_handle == ta_handle) {
+ kref_get(&sess->refcount);
+ return sess;
+ }
+
+ /* Allocate a new session and add to list */
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (sess) {
+ sess->ta_handle = ta_handle;
+ kref_init(&sess->refcount);
+ spin_lock_init(&sess->lock);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ }
+
+ return sess;
+}
+
+/* Requires mutex to be held */
+static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ u32 ta_handle = get_ta_handle(session);
+ u32 index = get_session_index(session);
+ struct amdtee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (ta_handle == sess->ta_handle &&
+ test_bit(index, sess->sess_mask))
+ return sess;
+
+ return NULL;
+}
+
+u32 get_buffer_id(struct tee_shm *shm)
+{
+ u32 buf_id = 0;
+ struct amdtee_shm_data *shmdata;
+
+ list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
+ if (shmdata->kaddr == shm->kaddr) {
+ buf_id = shmdata->buf_id;
+ break;
+ }
+
+ return buf_id;
+}
+
+static DEFINE_MUTEX(drv_mutex);
+static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
+ size_t *ta_size)
+{
+ const struct firmware *fw;
+ char fw_name[TA_PATH_MAX];
+ struct {
+ u32 lo;
+ u16 mid;
+ u16 hi_ver;
+ u8 seq_n[8];
+ } *uuid = ptr;
+ int n, rc = 0;
+
+ n = snprintf(fw_name, TA_PATH_MAX,
+ "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
+ TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
+ uuid->seq_n[0], uuid->seq_n[1],
+ uuid->seq_n[2], uuid->seq_n[3],
+ uuid->seq_n[4], uuid->seq_n[5],
+ uuid->seq_n[6], uuid->seq_n[7]);
+ if (n < 0 || n >= TA_PATH_MAX) {
+ pr_err("failed to get firmware name\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+ n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
+ if (n) {
+ pr_err("failed to load firmware %s\n", fw_name);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ *ta_size = roundup(fw->size, PAGE_SIZE);
+ *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
+ if (IS_ERR(*ta)) {
+ pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
+ (u64)*ta);
+ rc = -ENOMEM;
+ goto rel_fw;
+ }
+
+ memcpy(*ta, fw->data, fw->size);
+rel_fw:
+ release_firmware(fw);
+unlock:
+ mutex_unlock(&drv_mutex);
+ return rc;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess = NULL;
+ u32 session_info;
+ size_t ta_size;
+ int rc, i;
+ void *ta;
+
+ if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
+ pr_err("unsupported client login method\n");
+ return -EINVAL;
+ }
+
+ rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
+ if (rc) {
+ pr_err("failed to copy TA binary\n");
+ return rc;
+ }
+
+ /* Load the TA binary into TEE environment */
+ handle_load_ta(ta, ta_size, arg);
+ if (arg->ret == TEEC_SUCCESS) {
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+ }
+
+ if (arg->ret != TEEC_SUCCESS)
+ goto out;
+
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Find an empty session index for the given TA */
+ spin_lock(&sess->lock);
+ i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+ if (i < TEE_NUM_SESSIONS)
+ set_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+
+ if (i >= TEE_NUM_SESSIONS) {
+ pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+
+ if (arg->ret == TEEC_SUCCESS) {
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
+ } else {
+ pr_err("open_session failed %d\n", arg->ret);
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+out:
+ free_pages((u64)ta, get_order(ta_size));
+ return rc;
+}
+
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
+int amdtee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ u32 i, ta_handle, session_info;
+ struct amdtee_session *sess;
+
+ pr_debug("%s: sid = 0x%x\n", __func__, session);
+
+ /*
+ * Check that the session is valid and clear the session
+ * usage bit
+ */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, session);
+ if (sess) {
+ ta_handle = get_ta_handle(session);
+ i = get_session_index(session);
+ session_info = sess->session_info[i];
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Close the session */
+ handle_close_session(ta_handle, session_info);
+
+ kref_put(&sess->refcount, destroy_session);
+
+ return 0;
+}
+
+int amdtee_map_shmem(struct tee_shm *shm)
+{
+ struct shmem_desc shmem;
+ struct amdtee_shm_data *shmnode;
+ int rc, count;
+ u32 buf_id;
+
+ if (!shm)
+ return -EINVAL;
+
+ shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
+ if (!shmnode)
+ return -ENOMEM;
+
+ count = 1;
+ shmem.kaddr = shm->kaddr;
+ shmem.size = shm->size;
+
+ /*
+ * Send a MAP command to TEE and get the corresponding
+ * buffer Id
+ */
+ rc = handle_map_shmem(count, &shmem, &buf_id);
+ if (rc) {
+ pr_err("map_shmem failed: ret = %d\n", rc);
+ kfree(shmnode);
+ return rc;
+ }
+
+ shmnode->kaddr = shm->kaddr;
+ shmnode->buf_id = buf_id;
+ list_add(&shmnode->shm_node, &shmctx.shmdata_list);
+
+ pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+
+ return 0;
+}
+
+void amdtee_unmap_shmem(struct tee_shm *shm)
+{
+ struct amdtee_shm_data *shmnode;
+ u32 buf_id;
+
+ if (!shm)
+ return;
+
+ buf_id = get_buffer_id(shm);
+ /* Unmap the shared memory from TEE */
+ handle_unmap_shmem(buf_id);
+
+ list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
+ if (buf_id == shmnode->buf_id) {
+ list_del(&shmnode->shm_node);
+ kfree(shmnode);
+ break;
+ }
+}
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess;
+ u32 i, session_info;
+
+ /* Check that the session is valid */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, arg->session);
+ if (sess) {
+ i = get_session_index(arg->session);
+ session_info = sess->session_info[i];
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ handle_invoke_cmd(arg, session_info, param);
+
+ return 0;
+}
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ return -EINVAL;
+}
+
+static const struct tee_driver_ops amdtee_ops = {
+ .get_version = amdtee_get_version,
+ .open = amdtee_open,
+ .release = amdtee_release,
+ .open_session = amdtee_open_session,
+ .close_session = amdtee_close_session,
+ .invoke_func = amdtee_invoke_func,
+ .cancel_req = amdtee_cancel_req,
+};
+
+static const struct tee_desc amdtee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &amdtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init amdtee_driver_init(void)
+{
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct amdtee *amdtee;
+ int rc;
+
+ rc = psp_check_tee_status();
+ if (rc) {
+ pr_err("amd-tee driver: tee not present\n");
+ return rc;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
+ if (!amdtee) {
+ rc = -ENOMEM;
+ goto err_kfree_drv_data;
+ }
+
+ pool = amdtee_config_shm();
+ if (IS_ERR(pool)) {
+ pr_err("shared pool configuration error\n");
+ rc = PTR_ERR(pool);
+ goto err_kfree_amdtee;
+ }
+
+ teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ amdtee->teedev = teedev;
+
+ rc = tee_device_register(amdtee->teedev);
+ if (rc)
+ goto err_device_unregister;
+
+ amdtee->pool = pool;
+
+ drv_data->amdtee = amdtee;
+
+ pr_info("amd-tee driver initialization successful\n");
+ return 0;
+
+err_device_unregister:
+ tee_device_unregister(amdtee->teedev);
+
+err_free_pool:
+ tee_shm_pool_free(pool);
+
+err_kfree_amdtee:
+ kfree(amdtee);
+
+err_kfree_drv_data:
+ kfree(drv_data);
+ drv_data = NULL;
+
+ pr_err("amd-tee driver initialization failed\n");
+ return rc;
+}
+module_init(amdtee_driver_init);
+
+static void __exit amdtee_driver_exit(void)
+{
+ struct amdtee *amdtee;
+
+ if (!drv_data || !drv_data->amdtee)
+ return;
+
+ amdtee = drv_data->amdtee;
+
+ tee_device_unregister(amdtee->teedev);
+ tee_shm_pool_free(amdtee->pool);
+}
+module_exit(amdtee_driver_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION("AMD-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
new file mode 100644
index 000000000000..065854e2db18
--- /dev/null
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-sev.h>
+#include "amdtee_private.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned long va;
+ int rc;
+
+ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va)
+ return -ENOMEM;
+
+ shm->kaddr = (void *)va;
+ shm->paddr = __psp_pa((void *)va);
+ shm->size = PAGE_SIZE << order;
+
+ /* Map the allocated memory in to TEE */
+ rc = amdtee_map_shmem(shm);
+ if (rc) {
+ free_pages(va, order);
+ shm->kaddr = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
+{
+ /* Unmap the shared memory from TEE */
+ amdtee_unmap_shmem(shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+struct tee_shm_pool *amdtee_config_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index d1ad512e1708..3ca71e3812ed 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -3,6 +3,7 @@
config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
+ depends on MMU
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
index 0332a5301d61..d767eebf30bd 100644
--- a/drivers/tee/optee/shm_pool.c
+++ b/drivers/tee/optee/shm_pool.c
@@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
shm->size = PAGE_SIZE << order;
if (shm->flags & TEE_SHM_DMA_BUF) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
+
+ pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = page;
+ page++;
+ }
+
shm->flags |= TEE_SHM_REGISTER;
- rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+ rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
+ kfree(pages);
}
return rc;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 79b27865c6f4..5a05db5438d6 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -151,16 +151,33 @@ config THERMAL_GOV_POWER_ALLOCATOR
config CPU_THERMAL
bool "Generic cpu cooling support"
- depends on CPU_FREQ
depends on THERMAL_OF
help
+ Enable the CPU cooling features. If the system has no active
+ cooling device available, this option allows to use the CPU
+ as a cooling device.
+
+if CPU_THERMAL
+
+config CPU_FREQ_THERMAL
+ bool "CPU frequency cooling device"
+ depends on CPU_FREQ
+ default y
+ help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
(drivers/acpi/processor_thermal.c).
This will be useful for platforms using the generic thermal interface
and not the ACPI interface.
- If you want this support, you should say Y here.
+config CPU_IDLE_THERMAL
+ bool "CPU idle cooling device"
+ depends on IDLE_INJECT
+ help
+ This implements the CPU cooling mechanism through
+ idle injection. This will throttle the CPU by injecting
+ idle cycle.
+endif
config CLOCK_THERMAL
bool "Generic clock cooling support"
@@ -263,6 +280,20 @@ config SPEAR_THERMAL
Enable this to plug the SPEAr thermal sensor driver into the Linux
thermal framework.
+config SUN8I_THERMAL
+ tristate "Allwinner sun8i thermal driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on NVMEM
+ depends on OF
+ depends on RESET_CONTROLLER
+ help
+ Support for the sun8i thermal sensor driver into the Linux thermal
+ framework.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun8i-thermal.
+
config ROCKCHIP_THERMAL
tristate "Rockchip thermal driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index baeb70bf0568..9fb88e26fb10 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -19,7 +19,8 @@ thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
# cpufreq cooling
-thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+thermal_sys-$(CONFIG_CPU_FREQ_THERMAL) += cpufreq_cooling.o
+thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o
# clock cooling
thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
@@ -31,6 +32,7 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
obj-y += broadcom/
obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
+obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 8a9e9bc421c6..ccb1fe18e993 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -67,7 +67,11 @@
/**
* struct amlogic_thermal_soc_calib_data
- * @A, B, m, n: calibration parameters
+ * @A: calibration parameters
+ * @B: calibration parameters
+ * @m: calibration parameters
+ * @n: calibration parameters
+ *
* This structure is required for configuration of amlogic thermal driver.
*/
struct amlogic_thermal_soc_calib_data {
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 709a22f455e9..8c4d1244ee7a 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -155,6 +155,9 @@ static void armadaxp_init(struct platform_device *pdev,
regmap_write(priv->syscon, data->syscon_control1_off, reg);
+ reg &= ~PMU_TDC0_SW_RST_MASK;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
+
/* Enable the sensor */
regmap_read(priv->syscon, data->syscon_status_off, &reg);
reg &= ~PMU_TM_DISABLE_MASK;
@@ -578,7 +581,7 @@ static const struct armada_thermal_data armadaxp_data = {
.coef_m = 10000000ULL,
.coef_div = 13825,
.syscon_status_off = 0xb0,
- .syscon_control1_off = 0xd0,
+ .syscon_control1_off = 0x2d0,
};
static const struct armada_thermal_data armada370_data = {
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index cf43e1520de9..061f1db6edc9 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config BCM2711_THERMAL
+ tristate "Broadcom AVS RO thermal sensor driver"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on THERMAL_OF && MFD_SYSCON
+ help
+ Support for thermal sensors on Broadcom BCM2711 SoCs.
+
config BCM2835_THERMAL
tristate "Thermal sensors on bcm2835 SoC"
depends on ARCH_BCM2835 || COMPILE_TEST
diff --git a/drivers/thermal/broadcom/Makefile b/drivers/thermal/broadcom/Makefile
index 490ab1f7a86d..c917b243d5f0 100644
--- a/drivers/thermal/broadcom/Makefile
+++ b/drivers/thermal/broadcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM2711_THERMAL) += bcm2711_thermal.o
obj-$(CONFIG_BCM2835_THERMAL) += bcm2835_thermal.o
obj-$(CONFIG_BRCMSTB_THERMAL) += brcmstb_thermal.o
obj-$(CONFIG_BCM_NS_THERMAL) += ns-thermal.o
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
new file mode 100644
index 000000000000..67c2a737bc9d
--- /dev/null
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Broadcom AVS RO thermal sensor driver
+ *
+ * based on brcmstb_thermal
+ *
+ * Copyright (C) 2020 Stefan Wahren
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#include "../thermal_hwmon.h"
+
+#define AVS_RO_TEMP_STATUS 0x200
+#define AVS_RO_TEMP_STATUS_VALID_MSK (BIT(16) | BIT(10))
+#define AVS_RO_TEMP_STATUS_DATA_MSK GENMASK(9, 0)
+
+struct bcm2711_thermal_priv {
+ struct regmap *regmap;
+ struct thermal_zone_device *thermal;
+};
+
+static int bcm2711_get_temp(void *data, int *temp)
+{
+ struct bcm2711_thermal_priv *priv = data;
+ int slope = thermal_zone_get_slope(priv->thermal);
+ int offset = thermal_zone_get_offset(priv->thermal);
+ u32 val;
+ int ret;
+ long t;
+
+ ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & AVS_RO_TEMP_STATUS_VALID_MSK))
+ return -EIO;
+
+ val &= AVS_RO_TEMP_STATUS_DATA_MSK;
+
+ /* Convert a HW code to a temperature reading (millidegree celsius) */
+ t = slope * val + offset;
+
+ *temp = t < 0 ? 0 : t;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = {
+ .get_temp = bcm2711_get_temp,
+};
+
+static const struct of_device_id bcm2711_thermal_id_table[] = {
+ { .compatible = "brcm,bcm2711-thermal" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2711_thermal_id_table);
+
+static int bcm2711_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ struct bcm2711_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *parent;
+ struct regmap *regmap;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* get regmap from syscon node */
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+ priv->regmap = regmap;
+
+ thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv,
+ &bcm2711_thermal_of_ops);
+ if (IS_ERR(thermal)) {
+ ret = PTR_ERR(thermal);
+ dev_err(dev, "could not register sensor: %d\n", ret);
+ return ret;
+ }
+
+ priv->thermal = thermal;
+
+ thermal->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(thermal);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver bcm2711_thermal_driver = {
+ .probe = bcm2711_thermal_probe,
+ .driver = {
+ .name = "bcm2711_thermal",
+ .of_match_table = bcm2711_thermal_id_table,
+ },
+};
+module_platform_driver(bcm2711_thermal_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Wahren");
+MODULE_DESCRIPTION("Broadcom AVS RO thermal sensor driver");
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 5825ac581f56..8df5edef1ded 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -49,7 +49,7 @@
#define AVS_TMON_TP_TEST_ENABLE 0x20
/* Default coefficients */
-#define AVS_TMON_TEMP_SLOPE -487
+#define AVS_TMON_TEMP_SLOPE 487
#define AVS_TMON_TEMP_OFFSET 410040
/* HW related temperature constants */
@@ -102,29 +102,28 @@ static struct avs_tmon_trip avs_tmon_trips[] = {
},
};
+struct brcmstb_thermal_params {
+ unsigned int offset;
+ unsigned int mult;
+ const struct thermal_zone_of_device_ops *of_ops;
+};
+
struct brcmstb_thermal_priv {
void __iomem *tmon_base;
struct device *dev;
struct thermal_zone_device *thermal;
+ /* Process specific thermal parameters used for calculations */
+ const struct brcmstb_thermal_params *temp_params;
};
-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
- int *offset)
-{
- *slope = thermal_zone_get_slope(tz);
- *offset = thermal_zone_get_offset(tz);
-}
-
/* Convert a HW code to a temperature reading (millidegree celsius) */
-static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+static inline int avs_tmon_code_to_temp(struct brcmstb_thermal_priv *priv,
u32 code)
{
- const int val = code & AVS_TMON_TEMP_MASK;
- int slope, offset;
+ int offset = priv->temp_params->offset;
+ int mult = priv->temp_params->mult;
- avs_tmon_get_coeffs(tz, &slope, &offset);
-
- return slope * val + offset;
+ return (offset - (int)((code & AVS_TMON_TEMP_MASK) * mult));
}
/*
@@ -133,23 +132,22 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
* @temp: temperature to convert
* @low: if true, round toward the low side
*/
-static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv,
int temp, bool low)
{
- int slope, offset;
+ int offset = priv->temp_params->offset;
+ int mult = priv->temp_params->mult;
if (temp < AVS_TMON_TEMP_MIN)
- return AVS_TMON_TEMP_MAX; /* Maximum code value */
-
- avs_tmon_get_coeffs(tz, &slope, &offset);
+ return AVS_TMON_TEMP_MAX; /* Maximum code value */
if (temp >= offset)
return 0; /* Minimum code value */
if (low)
- return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
+ return (u32)(DIV_ROUND_UP(offset - temp, mult));
else
- return (u32)((offset - temp) / abs(slope));
+ return (u32)((offset - temp) / mult);
}
static int brcmstb_get_temp(void *data, int *temp)
@@ -167,7 +165,7 @@ static int brcmstb_get_temp(void *data, int *temp)
val = (val & AVS_TMON_STATUS_data_msk) >> AVS_TMON_STATUS_data_shift;
- t = avs_tmon_code_to_temp(priv->thermal, val);
+ t = avs_tmon_code_to_temp(priv, val);
if (t < 0)
*temp = 0;
else
@@ -201,7 +199,7 @@ static int avs_tmon_get_trip_temp(struct brcmstb_thermal_priv *priv,
val &= trip->reg_msk;
val >>= trip->reg_shift;
- return avs_tmon_code_to_temp(priv->thermal, val);
+ return avs_tmon_code_to_temp(priv, val);
}
static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
@@ -214,7 +212,7 @@ static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
dev_dbg(priv->dev, "set temp %d to %d\n", type, temp);
/* round toward low temp for the low interrupt */
- val = avs_tmon_temp_to_code(priv->thermal, temp,
+ val = avs_tmon_temp_to_code(priv, temp,
type == TMON_TRIP_TYPE_LOW);
val <<= trip->reg_shift;
@@ -231,7 +229,7 @@ static int avs_tmon_get_intr_temp(struct brcmstb_thermal_priv *priv)
u32 val;
val = __raw_readl(priv->tmon_base + AVS_TMON_TEMP_INT_CODE);
- return avs_tmon_code_to_temp(priv->thermal, val);
+ return avs_tmon_code_to_temp(priv, val);
}
static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
@@ -290,19 +288,37 @@ static int brcmstb_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops brcmstb_16nm_of_ops = {
+ .get_temp = brcmstb_get_temp,
+};
+
+static const struct brcmstb_thermal_params brcmstb_16nm_params = {
+ .offset = 457829,
+ .mult = 557,
+ .of_ops = &brcmstb_16nm_of_ops,
+};
+
+static const struct thermal_zone_of_device_ops brcmstb_28nm_of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
+static const struct brcmstb_thermal_params brcmstb_28nm_params = {
+ .offset = 410040,
+ .mult = 487,
+ .of_ops = &brcmstb_28nm_of_ops,
+};
+
static const struct of_device_id brcmstb_thermal_id_table[] = {
- { .compatible = "brcm,avs-tmon" },
+ { .compatible = "brcm,avs-tmon-bcm7216", .data = &brcmstb_16nm_params },
+ { .compatible = "brcm,avs-tmon", .data = &brcmstb_28nm_params },
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
static int brcmstb_thermal_probe(struct platform_device *pdev)
{
+ const struct thermal_zone_of_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
struct resource *res;
@@ -312,6 +328,10 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->temp_params = of_device_get_match_data(&pdev->dev);
+ if (!priv->temp_params)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->tmon_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->tmon_base))
@@ -319,9 +339,10 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
platform_set_drvdata(pdev, priv);
+ of_ops = priv->temp_params->of_ops;
thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv,
- &of_ops);
+ of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
@@ -331,16 +352,15 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->thermal = thermal;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "could not get IRQ\n");
- return irq;
- }
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- brcmstb_tmon_irq_thread, IRQF_ONESHOT,
- DRV_NAME, priv);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
- return ret;
+ if (irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ brcmstb_tmon_irq_thread,
+ IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
+ return ret;
+ }
}
dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
index 3ad3256c48fd..7cb3ae4b44ee 100644
--- a/drivers/thermal/clock_cooling.c
+++ b/drivers/thermal/clock_cooling.c
@@ -7,7 +7,7 @@
* Copyright (C) 2013 Texas Instruments Inc.
* Contact: Eduardo Valentin <eduardo.valentin@ti.com>
*
- * Highly based on cpu_cooling.c.
+ * Highly based on cpufreq_cooling.c.
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*/
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 52569b27b426..fe83d7a210d4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/drivers/thermal/cpu_cooling.c
+ * linux/drivers/thermal/cpufreq_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
*
@@ -63,6 +63,7 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
+ * @qos_req: PM QoS contraint to apply
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -620,7 +621,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
struct thermal_cooling_device *cdev = NULL;
if (!np) {
- pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ pr_err("cpufreq_cooling: OF node not available for cpu%d\n",
policy->cpu);
return NULL;
}
@@ -630,7 +631,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
cdev = __cpufreq_cooling_register(np, policy, em);
if (IS_ERR(cdev)) {
- pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
+ pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n",
policy->cpu, PTR_ERR(cdev));
cdev = NULL;
}
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
new file mode 100644
index 000000000000..0bb843246f59
--- /dev/null
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Limited.
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#include <linux/cpu_cooling.h>
+#include <linux/cpuidle.h>
+#include <linux/err.h>
+#include <linux/idle_inject.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+/**
+ * struct cpuidle_cooling_device - data for the idle cooling device
+ * @ii_dev: an atomic to keep track of the last task exiting the idle cycle
+ * @state: a normalized integer giving the state of the cooling device
+ */
+struct cpuidle_cooling_device {
+ struct idle_inject_device *ii_dev;
+ unsigned long state;
+};
+
+static DEFINE_IDA(cpuidle_ida);
+
+/**
+ * cpuidle_cooling_runtime - Running time computation
+ * @idle_duration_us: the idle cooling device
+ * @state: a percentile based number
+ *
+ * The running duration is computed from the idle injection duration
+ * which is fixed. If we reach 100% of idle injection ratio, that
+ * means the running duration is zero. If we have a 50% ratio
+ * injection, that means we have equal duration for idle and for
+ * running duration.
+ *
+ * The formula is deduced as follows:
+ *
+ * running = idle x ((100 / ratio) - 1)
+ *
+ * For precision purpose for integer math, we use the following:
+ *
+ * running = (idle x 100) / ratio - idle
+ *
+ * For example, if we have an injected duration of 50%, then we end up
+ * with 10ms of idle injection and 10ms of running duration.
+ *
+ * Return: An unsigned int for a usec based runtime duration.
+ */
+static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us,
+ unsigned long state)
+{
+ if (!state)
+ return 0;
+
+ return ((idle_duration_us * 100) / state) - idle_duration_us;
+}
+
+/**
+ * cpuidle_cooling_get_max_state - Get the maximum state
+ * @cdev : the thermal cooling device
+ * @state : a pointer to the state variable to be filled
+ *
+ * The function always returns 100 as the injection ratio. It is
+ * percentile based for consistency accross different platforms.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ /*
+ * Depending on the configuration or the hardware, the running
+ * cycle and the idle cycle could be different. We want to
+ * unify that to an 0..100 interval, so the set state
+ * interface will be the same whatever the platform is.
+ *
+ * The state 100% will make the cluster 100% ... idle. A 0%
+ * injection ratio means no idle injection at all and 50%
+ * means for 10ms of idle injection, we have 10ms of running
+ * time.
+ */
+ *state = 100;
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_get_cur_state - Get the current cooling state
+ * @cdev: the thermal cooling device
+ * @state: a pointer to the state
+ *
+ * The function just copies the state value from the private thermal
+ * cooling device structure, the mapping is 1 <-> 1.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpuidle_cooling_device *idle_cdev = cdev->devdata;
+
+ *state = idle_cdev->state;
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_set_cur_state - Set the current cooling state
+ * @cdev: the thermal cooling device
+ * @state: the target state
+ *
+ * The function checks first if we are initiating the mitigation which
+ * in turn wakes up all the idle injection tasks belonging to the idle
+ * cooling device. In any case, it updates the internal state for the
+ * cooling device.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct cpuidle_cooling_device *idle_cdev = cdev->devdata;
+ struct idle_inject_device *ii_dev = idle_cdev->ii_dev;
+ unsigned long current_state = idle_cdev->state;
+ unsigned int runtime_us, idle_duration_us;
+
+ idle_cdev->state = state;
+
+ idle_inject_get_duration(ii_dev, &runtime_us, &idle_duration_us);
+
+ runtime_us = cpuidle_cooling_runtime(idle_duration_us, state);
+
+ idle_inject_set_duration(ii_dev, runtime_us, idle_duration_us);
+
+ if (current_state == 0 && state > 0) {
+ idle_inject_start(ii_dev);
+ } else if (current_state > 0 && !state) {
+ idle_inject_stop(ii_dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_ops - thermal cooling device ops
+ */
+static struct thermal_cooling_device_ops cpuidle_cooling_ops = {
+ .get_max_state = cpuidle_cooling_get_max_state,
+ .get_cur_state = cpuidle_cooling_get_cur_state,
+ .set_cur_state = cpuidle_cooling_set_cur_state,
+};
+
+/**
+ * cpuidle_of_cooling_register - Idle cooling device initialization function
+ * @drv: a cpuidle driver structure pointer
+ * @np: a node pointer to a device tree cooling device node
+ *
+ * This function is in charge of creating a cooling device per cpuidle
+ * driver and register it to thermal framework.
+ *
+ * Return: zero on success, or negative value corresponding to the
+ * error detected in the underlying subsystems.
+ */
+int cpuidle_of_cooling_register(struct device_node *np,
+ struct cpuidle_driver *drv)
+{
+ struct idle_inject_device *ii_dev;
+ struct cpuidle_cooling_device *idle_cdev;
+ struct thermal_cooling_device *cdev;
+ char dev_name[THERMAL_NAME_LENGTH];
+ int id, ret;
+
+ idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL);
+ if (!idle_cdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ id = ida_simple_get(&cpuidle_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto out_kfree;
+ }
+
+ ii_dev = idle_inject_register(drv->cpumask);
+ if (!ii_dev) {
+ ret = -EINVAL;
+ goto out_id;
+ }
+
+ idle_inject_set_duration(ii_dev, TICK_USEC, TICK_USEC);
+
+ idle_cdev->ii_dev = ii_dev;
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-idle-%d", id);
+
+ cdev = thermal_of_cooling_device_register(np, dev_name, idle_cdev,
+ &cpuidle_cooling_ops);
+ if (IS_ERR(cdev)) {
+ ret = PTR_ERR(cdev);
+ goto out_unregister;
+ }
+
+ return 0;
+
+out_unregister:
+ idle_inject_unregister(ii_dev);
+out_id:
+ ida_simple_remove(&cpuidle_ida, id);
+out_kfree:
+ kfree(idle_cdev);
+out:
+ return ret;
+}
+
+/**
+ * cpuidle_cooling_register - Idle cooling device initialization function
+ * @drv: a cpuidle driver structure pointer
+ *
+ * This function is in charge of creating a cooling device per cpuidle
+ * driver and register it to thermal framework.
+ *
+ * Return: zero on success, or negative value corresponding to the
+ * error detected in the underlying subsystems.
+ */
+int cpuidle_cooling_register(struct cpuidle_driver *drv)
+{
+ return cpuidle_of_cooling_register(NULL, drv);
+}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 372dbbaaafb8..21d4d6e6409a 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -152,8 +152,8 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
next_low, next_high);
- dev_info(&th->tz->device,
- "PRCMU set max %ld, min %ld\n", next_high, next_low);
+ dev_dbg(&th->tz->device,
+ "PRCMU set max %ld, min %ld\n", next_high, next_low);
} else if (idx == num_points - 1)
/* So we roof out 1 degree over the max point */
th->interpolated_temp = db8500_thermal_points[idx] + 1;
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index ef59256887ff..a87d4fa031c8 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -53,6 +53,7 @@ static DEFINE_IDA(devfreq_ida);
* 'utilization' (which is 'busy_time / 'total_time').
* The 'res_util' range is from 100 to (power_table[state] * 100)
* for the corresponding 'state'.
+ * @capped_state: index to cooling state with in dynamic power budget
*/
struct devfreq_cooling_device {
int id;
@@ -587,7 +588,7 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_register);
/**
* devfreq_cooling_unregister() - Unregister devfreq cooling device.
- * @dfc: Pointer to devfreq cooling device to unregister.
+ * @cdev: Pointer to devfreq cooling device to unregister.
*/
void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index afd99f668c65..aaa07180ab48 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -58,8 +58,8 @@ static long get_target_state(struct thermal_zone_device *tz,
/**
* fair_share_throttle - throttles devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* Throttling Logic: This uses three parameters to calculate the new
* throttle state of the cooling devices associated with the given zone.
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index b831fc77cf64..991a1c54296d 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -71,8 +71,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* bang_bang_control - controls devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - the trip point
+ * @tz: thermal_zone_device
+ * @trip: the trip point
*
* Regulation Logic: a two point regulation, deliver cooling state depending
* on the previous state shown in this diagram:
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 3517883b5cdb..efae0c02d898 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3400_thermal_match[] = {
+ {"INT1040", 0},
{"INT3400", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index a7bbd8584ae2..aeece1e136a5 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT1043", 0},
{"INT3403", 0},
{"", 0},
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 89a015387283..b1fd34516e28 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -42,6 +42,9 @@
/* IceLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_ICL_THERMAL 0x8a03
+/* JasperLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4503
+
#define DRV_NAME "proc_thermal"
struct power_config {
@@ -724,6 +727,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_GLK_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
.driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)},
{ 0, },
};
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index 4f0bb8f502e1..ed75a0c603e7 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -23,6 +23,7 @@
#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */
#define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */
+#define PCH_THERMAL_DID_CML_H 0X06F9 /* CML-H PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -272,6 +273,7 @@ enum board_ids {
board_wpt,
board_skl,
board_cnl,
+ board_cml,
};
static const struct board_info {
@@ -294,6 +296,10 @@ static const struct board_info {
.name = "pch_cannonlake",
.ops = &pch_dev_ops_wpt,
},
+ [board_cml] = {
+ .name = "pch_cometlake",
+ .ops = &pch_dev_ops_wpt,
+ }
};
static int intel_pch_thermal_probe(struct pci_dev *pdev,
@@ -365,7 +371,7 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
thermal_zone_device_unregister(ptd->tzd);
iounmap(ptd->hw_base);
pci_set_drvdata(pdev, NULL);
- pci_release_region(pdev, 0);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -398,6 +404,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
.driver_data = board_cnl, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
.driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CML_H),
+ .driver_data = board_cml, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 88fd0fbe0cfa..82d06c7411eb 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -33,7 +33,7 @@ struct max77620_therm_info {
/**
* max77620_thermal_read_temp: Read PMIC die temperatue.
* @data: Device specific data.
- * temp: Temperature in millidegrees Celsius
+ * @temp: Temperature in millidegrees Celsius
*
* The actual temperature of PMIC die is not available from PMIC.
* PMIC only tells the status if it has crossed or not the threshold level
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index acf4854cbb8b..76e30603d4d5 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -358,7 +358,7 @@ static const int mt7622_mux_values[MT7622_NUM_SENSORS] = { 0, };
static const int mt7622_vts_index[MT7622_NUM_SENSORS] = { VTS1 };
static const int mt7622_tc_offset[MT7622_NUM_CONTROLLER] = { 0x0, };
-/**
+/*
* The MT8173 thermal controller has four banks. Each bank can read up to
* four temperature sensors simultaneously. The MT8173 has a total of 5
* temperature sensors. We use each bank to measure a certain area of the
@@ -400,7 +400,7 @@ static const struct mtk_thermal_data mt8173_thermal_data = {
.sensor_mux_values = mt8173_mux_values,
};
-/**
+/*
* The MT2701 thermal controller has one bank, which can read up to
* three temperature sensors simultaneously. The MT2701 has a total of 3
* temperature sensors.
@@ -430,7 +430,7 @@ static const struct mtk_thermal_data mt2701_thermal_data = {
.sensor_mux_values = mt2701_mux_values,
};
-/**
+/*
* The MT2712 thermal controller has one bank, which can read up to
* four temperature sensors simultaneously. The MT2712 has a total of 4
* temperature sensors.
@@ -484,7 +484,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
.sensor_mux_values = mt7622_mux_values,
};
-/**
+/*
* The MT8183 thermal controller has one bank for the current SW framework.
* The MT8183 has a total of 6 temperature sensors.
* There are two thermal controller to control the six sensor.
@@ -495,7 +495,6 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
* data, and this indeed needs the temperatures of the individual banks
* for making better decisions.
*/
-
static const struct mtk_thermal_data mt8183_thermal_data = {
.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
.num_banks = MT8183_NUM_SENSORS_PER_ZONE,
@@ -519,7 +518,8 @@ static const struct mtk_thermal_data mt8183_thermal_data = {
/**
* raw_to_mcelsius - convert a raw ADC value to mcelsius
- * @mt: The thermal controller
+ * @mt: The thermal controller
+ * @sensno: sensor number
* @raw: raw ADC value
*
* This converts the raw ADC value to mcelsius using the SoC specific
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index dc5093be553e..ef0baa954ff0 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -493,7 +493,7 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
if (!dev || !dev->of_node) {
of_node_put(np);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
}
sensor_np = of_node_get(dev->of_node);
@@ -754,7 +754,7 @@ end:
return ret;
}
-/**
+/*
* It maps 'enum thermal_trip_type' found in include/linux/thermal.h
* into the device tree binding of 'trip', property type.
*/
@@ -977,7 +977,7 @@ free_tz:
return ERR_PTR(ret);
}
-static inline void of_thermal_free_zone(struct __thermal_zone *tz)
+static __init void of_thermal_free_zone(struct __thermal_zone *tz)
{
struct __thermal_bind_params *tbp;
int i, j;
@@ -999,6 +999,38 @@ static inline void of_thermal_free_zone(struct __thermal_zone *tz)
}
/**
+ * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ *
+ * Finds all zones parsed and added to the thermal framework and remove them
+ * from the system, together with their resources.
+ *
+ */
+static __init void of_thermal_destroy_zones(void)
+{
+ struct device_node *np, *child;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_debug("unable to find thermal zones\n");
+ return;
+ }
+
+ for_each_available_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+
+ zone = thermal_zone_get_zone_by_name(child->name);
+ if (IS_ERR(zone))
+ continue;
+
+ thermal_zone_device_unregister(zone);
+ kfree(zone->tzp);
+ kfree(zone->ops);
+ of_thermal_free_zone(zone->devdata);
+ }
+ of_node_put(np);
+}
+
+/**
* of_parse_thermal_zones - parse device tree thermal data
*
* Initialization function that can be called by machine initialization
@@ -1087,35 +1119,3 @@ exit_free:
return -ENOMEM;
}
-
-/**
- * of_thermal_destroy_zones - remove all zones parsed and allocated resources
- *
- * Finds all zones parsed and added to the thermal framework and remove them
- * from the system, together with their resources.
- *
- */
-void of_thermal_destroy_zones(void)
-{
- struct device_node *np, *child;
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return;
- }
-
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
-
- zone = thermal_zone_get_zone_by_name(child->name);
- if (IS_ERR(zone))
- continue;
-
- thermal_zone_device_unregister(zone);
- kfree(zone->tzp);
- kfree(zone->ops);
- of_thermal_free_zone(zone->devdata);
- }
- of_node_put(np);
-}
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 45e9fcb172cc..874bc46e6c73 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -9,9 +9,12 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
#include <linux/thermal.h>
#include "thermal_core.h"
+#include "thermal_hwmon.h"
#define SITES_MAX 16
#define TMR_DISABLE 0x0
@@ -24,129 +27,80 @@
#define TMU_VER1 0x1
#define TMU_VER2 0x2
-/*
- * QorIQ TMU Registers
- */
-struct qoriq_tmu_site_regs {
- u32 tritsr; /* Immediate Temperature Site Register */
- u32 tratsr; /* Average Temperature Site Register */
- u8 res0[0x8];
-};
+#define REGS_TMR 0x000 /* Mode Register */
+#define TMR_DISABLE 0x0
+#define TMR_ME 0x80000000
+#define TMR_ALPF 0x0c000000
+#define TMR_MSITE_ALL GENMASK(15, 0)
-struct qoriq_tmu_regs_v1 {
- u32 tmr; /* Mode Register */
- u32 tsr; /* Status Register */
- u32 tmtmir; /* Temperature measurement interval Register */
- u8 res0[0x14];
- u32 tier; /* Interrupt Enable Register */
- u32 tidr; /* Interrupt Detect Register */
- u32 tiscr; /* Interrupt Site Capture Register */
- u32 ticscr; /* Interrupt Critical Site Capture Register */
- u8 res1[0x10];
- u32 tmhtcrh; /* High Temperature Capture Register */
- u32 tmhtcrl; /* Low Temperature Capture Register */
- u8 res2[0x8];
- u32 tmhtitr; /* High Temperature Immediate Threshold */
- u32 tmhtatr; /* High Temperature Average Threshold */
- u32 tmhtactr; /* High Temperature Average Crit Threshold */
- u8 res3[0x24];
- u32 ttcfgr; /* Temperature Configuration Register */
- u32 tscfgr; /* Sensor Configuration Register */
- u8 res4[0x78];
- struct qoriq_tmu_site_regs site[SITES_MAX];
- u8 res5[0x9f8];
- u32 ipbrr0; /* IP Block Revision Register 0 */
- u32 ipbrr1; /* IP Block Revision Register 1 */
- u8 res6[0x310];
- u32 ttrcr[4]; /* Temperature Range Control Register */
-};
+#define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */
+#define TMTMIR_DEFAULT 0x0000000f
-struct qoriq_tmu_regs_v2 {
- u32 tmr; /* Mode Register */
- u32 tsr; /* Status Register */
- u32 tmsr; /* monitor site register */
- u32 tmtmir; /* Temperature measurement interval Register */
- u8 res0[0x10];
- u32 tier; /* Interrupt Enable Register */
- u32 tidr; /* Interrupt Detect Register */
- u8 res1[0x8];
- u32 tiiscr; /* interrupt immediate site capture register */
- u32 tiascr; /* interrupt average site capture register */
- u32 ticscr; /* Interrupt Critical Site Capture Register */
- u32 res2;
- u32 tmhtcr; /* monitor high temperature capture register */
- u32 tmltcr; /* monitor low temperature capture register */
- u32 tmrtrcr; /* monitor rising temperature rate capture register */
- u32 tmftrcr; /* monitor falling temperature rate capture register */
- u32 tmhtitr; /* High Temperature Immediate Threshold */
- u32 tmhtatr; /* High Temperature Average Threshold */
- u32 tmhtactr; /* High Temperature Average Crit Threshold */
- u32 res3;
- u32 tmltitr; /* monitor low temperature immediate threshold */
- u32 tmltatr; /* monitor low temperature average threshold register */
- u32 tmltactr; /* monitor low temperature average critical threshold */
- u32 res4;
- u32 tmrtrctr; /* monitor rising temperature rate critical threshold */
- u32 tmftrctr; /* monitor falling temperature rate critical threshold*/
- u8 res5[0x8];
- u32 ttcfgr; /* Temperature Configuration Register */
- u32 tscfgr; /* Sensor Configuration Register */
- u8 res6[0x78];
- struct qoriq_tmu_site_regs site[SITES_MAX];
- u8 res7[0x9f8];
- u32 ipbrr0; /* IP Block Revision Register 0 */
- u32 ipbrr1; /* IP Block Revision Register 1 */
- u8 res8[0x300];
- u32 teumr0;
- u32 teumr1;
- u32 teumr2;
- u32 res9;
- u32 ttrcr[4]; /* Temperature Range Control Register */
-};
+#define REGS_V2_TMSR 0x008 /* monitor site register */
-struct qoriq_tmu_data;
+#define REGS_V2_TMTMIR 0x00c /* Temperature measurement interval Register */
+
+#define REGS_TIER 0x020 /* Interrupt Enable Register */
+#define TIER_DISABLE 0x0
+
+
+#define REGS_TTCFGR 0x080 /* Temperature Configuration Register */
+#define REGS_TSCFGR 0x084 /* Sensor Configuration Register */
+
+#define REGS_TRITSR(n) (0x100 + 16 * (n)) /* Immediate Temperature
+ * Site Register
+ */
+#define TRITSR_V BIT(31)
+#define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
+ * Control Register
+ */
+#define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
+ * Register n
+ */
+#define REGS_V2_TEUMR(n) (0xf00 + 4 * (n))
/*
* Thermal zone data
*/
struct qoriq_sensor {
- struct thermal_zone_device *tzd;
- struct qoriq_tmu_data *qdata;
int id;
};
struct qoriq_tmu_data {
int ver;
- struct qoriq_tmu_regs_v1 __iomem *regs;
- struct qoriq_tmu_regs_v2 __iomem *regs_v2;
+ struct regmap *regmap;
struct clk *clk;
- bool little_endian;
- struct qoriq_sensor *sensor[SITES_MAX];
+ struct qoriq_sensor sensor[SITES_MAX];
};
-static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
+static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s)
{
- if (p->little_endian)
- iowrite32(val, addr);
- else
- iowrite32be(val, addr);
-}
-
-static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr)
-{
- if (p->little_endian)
- return ioread32(addr);
- else
- return ioread32be(addr);
+ return container_of(s, struct qoriq_tmu_data, sensor[s->id]);
}
static int tmu_get_temp(void *p, int *temp)
{
struct qoriq_sensor *qsensor = p;
- struct qoriq_tmu_data *qdata = qsensor->qdata;
+ struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor);
u32 val;
+ /*
+ * REGS_TRITSR(id) has the following layout:
+ *
+ * 31 ... 7 6 5 4 3 2 1 0
+ * V TEMP
+ *
+ * Where V bit signifies if the measurement is ready and is
+ * within sensor range. TEMP is an 8 bit value representing
+ * temperature in C.
+ */
+ if (regmap_read_poll_timeout(qdata->regmap,
+ REGS_TRITSR(qsensor->id),
+ val,
+ val & TRITSR_V,
+ USEC_PER_MSEC,
+ 10 * USEC_PER_MSEC))
+ return -ENODATA;
- val = tmu_read(qdata, &qdata->regs->site[qsensor->id].tritsr);
*temp = (val & 0xff) * 1000;
return 0;
@@ -156,84 +110,82 @@ static const struct thermal_zone_of_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
-static int qoriq_tmu_register_tmu_zone(struct platform_device *pdev)
+static int qoriq_tmu_register_tmu_zone(struct device *dev,
+ struct qoriq_tmu_data *qdata)
{
- struct qoriq_tmu_data *qdata = platform_get_drvdata(pdev);
- int id, sites = 0;
+ int id;
+
+ if (qdata->ver == TMU_VER1) {
+ regmap_write(qdata->regmap, REGS_TMR,
+ TMR_MSITE_ALL | TMR_ME | TMR_ALPF);
+ } else {
+ regmap_write(qdata->regmap, REGS_V2_TMSR, TMR_MSITE_ALL);
+ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
+ }
for (id = 0; id < SITES_MAX; id++) {
- qdata->sensor[id] = devm_kzalloc(&pdev->dev,
- sizeof(struct qoriq_sensor), GFP_KERNEL);
- if (!qdata->sensor[id])
- return -ENOMEM;
-
- qdata->sensor[id]->id = id;
- qdata->sensor[id]->qdata = qdata;
- qdata->sensor[id]->tzd = devm_thermal_zone_of_sensor_register(
- &pdev->dev, id, qdata->sensor[id], &tmu_tz_ops);
- if (IS_ERR(qdata->sensor[id]->tzd)) {
- if (PTR_ERR(qdata->sensor[id]->tzd) == -ENODEV)
+ struct thermal_zone_device *tzd;
+ struct qoriq_sensor *sensor = &qdata->sensor[id];
+ int ret;
+
+ sensor->id = id;
+
+ tzd = devm_thermal_zone_of_sensor_register(dev, id,
+ sensor,
+ &tmu_tz_ops);
+ ret = PTR_ERR_OR_ZERO(tzd);
+ if (ret) {
+ if (ret == -ENODEV)
continue;
- else
- return PTR_ERR(qdata->sensor[id]->tzd);
+
+ regmap_write(qdata->regmap, REGS_TMR, TMR_DISABLE);
+ return ret;
}
- if (qdata->ver == TMU_VER1)
- sites |= 0x1 << (15 - id);
- else
- sites |= 0x1 << id;
- }
+ if (devm_thermal_add_hwmon_sysfs(tzd))
+ dev_warn(dev,
+ "Failed to add hwmon sysfs attributes\n");
- /* Enable monitoring */
- if (sites != 0) {
- if (qdata->ver == TMU_VER1) {
- tmu_write(qdata, sites | TMR_ME | TMR_ALPF,
- &qdata->regs->tmr);
- } else {
- tmu_write(qdata, sites, &qdata->regs_v2->tmsr);
- tmu_write(qdata, TMR_ME | TMR_ALPF_V2,
- &qdata->regs_v2->tmr);
- }
}
return 0;
}
-static int qoriq_tmu_calibration(struct platform_device *pdev)
+static int qoriq_tmu_calibration(struct device *dev,
+ struct qoriq_tmu_data *data)
{
int i, val, len;
u32 range[4];
const u32 *calibration;
- struct device_node *np = pdev->dev.of_node;
- struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+ struct device_node *np = dev->of_node;
len = of_property_count_u32_elems(np, "fsl,tmu-range");
if (len < 0 || len > 4) {
- dev_err(&pdev->dev, "invalid range data.\n");
+ dev_err(dev, "invalid range data.\n");
return len;
}
val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
if (val != 0) {
- dev_err(&pdev->dev, "failed to read range data.\n");
+ dev_err(dev, "failed to read range data.\n");
return val;
}
/* Init temperature range registers */
for (i = 0; i < len; i++)
- tmu_write(data, range[i], &data->regs->ttrcr[i]);
+ regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
- dev_err(&pdev->dev, "invalid calibration data.\n");
+ dev_err(dev, "invalid calibration data.\n");
return -ENODEV;
}
for (i = 0; i < len; i += 8, calibration += 2) {
val = of_read_number(calibration, 1);
- tmu_write(data, val, &data->regs->ttcfgr);
+ regmap_write(data->regmap, REGS_TTCFGR, val);
val = of_read_number(calibration + 1, 1);
- tmu_write(data, val, &data->regs->tscfgr);
+ regmap_write(data->regmap, REGS_TSCFGR, val);
}
return 0;
@@ -242,76 +194,117 @@ static int qoriq_tmu_calibration(struct platform_device *pdev)
static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
{
/* Disable interrupt, using polling instead */
- tmu_write(data, TIER_DISABLE, &data->regs->tier);
+ regmap_write(data->regmap, REGS_TIER, TIER_DISABLE);
/* Set update_interval */
+
if (data->ver == TMU_VER1) {
- tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+ regmap_write(data->regmap, REGS_TMTMIR, TMTMIR_DEFAULT);
} else {
- tmu_write(data, TMTMIR_DEFAULT, &data->regs_v2->tmtmir);
- tmu_write(data, TEUMR0_V2, &data->regs_v2->teumr0);
+ regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT);
+ regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2);
}
/* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
}
+static const struct regmap_range qoriq_yes_ranges[] = {
+ regmap_reg_range(REGS_TMR, REGS_TSCFGR),
+ regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(3)),
+ regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)),
+ regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)),
+ /* Read only registers below */
+ regmap_reg_range(REGS_TRITSR(0), REGS_TRITSR(15)),
+};
+
+static const struct regmap_access_table qoriq_wr_table = {
+ .yes_ranges = qoriq_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges) - 1,
+};
+
+static const struct regmap_access_table qoriq_rd_table = {
+ .yes_ranges = qoriq_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges),
+};
+
static int qoriq_tmu_probe(struct platform_device *pdev)
{
int ret;
u32 ver;
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
+ struct device *dev = &pdev->dev;
+ const bool little_endian = of_property_read_bool(np, "little-endian");
+ const enum regmap_endian format_endian =
+ little_endian ? REGMAP_ENDIAN_LITTLE : REGMAP_ENDIAN_BIG;
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &qoriq_rd_table,
+ .wr_table = &qoriq_wr_table,
+ .val_format_endian = format_endian,
+ .max_register = SZ_4K,
+ };
+ void __iomem *base;
+
+ data = devm_kzalloc(dev, sizeof(struct qoriq_tmu_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- platform_set_drvdata(pdev, data);
-
- data->little_endian = of_property_read_bool(np, "little-endian");
+ base = devm_platform_ioremap_resource(pdev, 0);
+ ret = PTR_ERR_OR_ZERO(base);
+ if (ret) {
+ dev_err(dev, "Failed to get memory region\n");
+ return ret;
+ }
- data->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(data->regs)) {
- dev_err(&pdev->dev, "Failed to get memory region\n");
- return PTR_ERR(data->regs);
+ data->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ ret = PTR_ERR_OR_ZERO(data->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to init regmap (%d)\n", ret);
+ return ret;
}
- data->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ data->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret) {
- dev_err(&pdev->dev, "Failed to enable clock\n");
+ dev_err(dev, "Failed to enable clock\n");
return ret;
}
/* version register offset at: 0xbf8 on both v1 and v2 */
- ver = tmu_read(data, &data->regs->ipbrr0);
+ ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read IP block version\n");
+ return ret;
+ }
data->ver = (ver >> 8) & 0xff;
- if (data->ver == TMU_VER2)
- data->regs_v2 = (void __iomem *)data->regs;
qoriq_tmu_init_device(data); /* TMU initialization */
- ret = qoriq_tmu_calibration(pdev); /* TMU calibration */
+ ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */
if (ret < 0)
goto err;
- ret = qoriq_tmu_register_tmu_zone(pdev);
+ ret = qoriq_tmu_register_tmu_zone(dev, data);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register sensors\n");
+ dev_err(dev, "Failed to register sensors\n");
ret = -ENODEV;
goto err;
}
+ platform_set_drvdata(pdev, data);
+
return 0;
err:
clk_disable_unprepare(data->clk);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -321,24 +314,21 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
/* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
clk_disable_unprepare(data->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
{
- u32 tmr;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+ int ret;
- /* Disable monitoring */
- tmr = tmu_read(data, &data->regs->tmr);
- tmr &= ~TMR_ME;
- tmu_write(data, tmr, &data->regs->tmr);
+ ret = regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, 0);
+ if (ret)
+ return ret;
clk_disable_unprepare(data->clk);
@@ -347,7 +337,6 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
static int __maybe_unused qoriq_tmu_resume(struct device *dev)
{
- u32 tmr;
int ret;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
@@ -356,11 +345,7 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev)
return ret;
/* Enable monitoring */
- tmr = tmu_read(data, &data->regs->tmr);
- tmr |= TMR_ME;
- tmu_write(data, tmr, &data->regs->tmr);
-
- return 0;
+ return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
}
static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 1460cf9d9f1c..72877bdc072d 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -182,9 +182,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
tsc->coef.a2);
mcelsius = FIXPT_TO_MCELSIUS(val);
- /* Make sure we are inside specifications */
- if ((mcelsius < MCELSIUS(-40)) || (mcelsius > MCELSIUS(125)))
- return -EIO;
+ /* Guaranteed operating range is -40C to 125C. */
/* Round value to device granularity setting */
*temp = rcar_gen3_thermal_round(mcelsius);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index d0873de718da..8f1aafa2044e 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -219,7 +219,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
* to get stable temperature.
* see "Usage Notes" on datasheet
*/
- udelay(300);
+ usleep_range(300, 400);
new = rcar_thermal_read(priv, THSSR) & CTEMP;
if (new == old) {
@@ -275,12 +275,7 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
tmp = MCELSIUS((priv->ctemp * 5) - 60);
mutex_unlock(&priv->lock);
- if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
- struct device *dev = rcar_priv_to_dev(priv);
-
- dev_err(dev, "it couldn't measure temperature correctly\n");
- return -EIO;
- }
+ /* Guaranteed operating range is -45C to 125C. */
*temp = tmp;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 343c2f5c5a25..7c1a8bccdcba 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -19,7 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/pinctrl/consumer.h>
-/**
+/*
* If the temperature over a period of time High,
* the resulting TSHUT gave CRU module,let it reset the entire chip,
* or via GPIO give PMIC.
@@ -29,7 +29,7 @@ enum tshut_mode {
TSHUT_MODE_GPIO,
};
-/**
+/*
* The system Temperature Sensors tshut(tshut) polarity
* the bit 8 is tshut polarity.
* 0: low active, 1: high active
@@ -39,7 +39,7 @@ enum tshut_polarity {
TSHUT_HIGH_ACTIVE,
};
-/**
+/*
* The system has two Temperature Sensors.
* sensor0 is for CPU, and sensor1 is for GPU.
*/
@@ -48,7 +48,7 @@ enum sensor_id {
SENSOR_GPU,
};
-/**
+/*
* The conversion table has the adc value and temperature.
* ADC_DECREMENT: the adc value is of diminishing.(e.g. rk3288_code_table)
* ADC_INCREMENT: the adc value is incremental.(e.g. rk3368_code_table)
@@ -58,6 +58,8 @@ enum adc_sort_mode {
ADC_INCREMENT,
};
+#include "thermal_hwmon.h"
+
/**
* The max sensors is two in rockchip SoCs.
* Two sensors: CPU and GPU sensor.
@@ -80,13 +82,14 @@ struct chip_tsadc_table {
/**
* struct rockchip_tsadc_chip - hold the private data of tsadc chip
- * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel
+ * @chn_id: array of sensor ids of chip corresponding to the channel
* @chn_num: the channel number of tsadc chip
* @tshut_temp: the hardware-controlled shutdown temperature value
* @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
* @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
* @initialize: SoC special initialize tsadc controller method
* @irq_ack: clear the interrupt
+ * @control: enable/disable method for the tsadc controller
* @get_temp: get the temperature
* @set_alarm_temp: set the high temperature interrupt
* @set_tshut_temp: set the hardware-controlled shutdown temperature
@@ -139,7 +142,7 @@ struct rockchip_thermal_sensor {
* @chip: pointer to the platform/configuration data
* @pdev: platform device of thermal
* @reset: the reset controller of tsadc
- * @sensors[SOC_MAX_SENSORS]: the thermal sensor
+ * @sensors: array of thermal sensors
* @clk: the controller clock is divided by the exteral 24MHz
* @pclk: the advanced peripherals bus clock
* @grf: the general register file will be used to do static set by software
@@ -590,6 +593,9 @@ static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
/**
* rk_tsadcv2_initialize - initialize TASDC Controller.
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*
* (1) Set TSADC_V2_AUTO_PERIOD:
* Configure the interleave between every two accessing of
@@ -624,6 +630,9 @@ static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
/**
* rk_tsadcv3_initialize - initialize TASDC Controller.
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*
* (1) The tsadc control power sequence.
*
@@ -723,6 +732,8 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
/**
* rk_tsadcv3_control - the tsadc controller is enabled or disabled.
+ * @regs: the base address of tsadc controller
+ * @enable: boolean flag to enable the controller
*
* NOTE: TSADC controller works at auto mode, and some SoCs need set the
* tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output
@@ -1206,6 +1217,7 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
/**
* Reset TSADC Controller, reset all tsadc registers.
+ * @reset: the reset controller of tsadc
*/
static void rockchip_thermal_reset_controller(struct reset_control *reset)
{
@@ -1321,8 +1333,15 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->control(thermal->regs, true);
- for (i = 0; i < thermal->chip->chn_num; i++)
+ for (i = 0; i < thermal->chip->chn_num; i++) {
rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+ thermal->sensors[i].tzd->tzp->no_hwmon = false;
+ error = thermal_add_hwmon_sysfs(thermal->sensors[i].tzd);
+ if (error)
+ dev_warn(&pdev->dev,
+ "failed to register sensor %d with hwmon: %d\n",
+ i, error);
+ }
platform_set_drvdata(pdev, thermal);
@@ -1344,6 +1363,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
for (i = 0; i < thermal->chip->chn_num; i++) {
struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
+ thermal_remove_hwmon_sysfs(sensor->tzd);
rockchip_thermal_toggle_sensor(sensor, false);
}
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index fe0d2ba51392..f4eff5a41a84 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -5,7 +5,7 @@ config EXYNOS_THERMAL
depends on HAS_IOMEM
help
If you say yes here you get support for the TMU (Thermal Management
- Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
+ Unit) driver for Samsung Exynos series of SoCs. This driver initialises
the TMU, reports temperature and handles cooling action if defined.
This driver uses the Exynos core thermal APIs and TMU configuration
data from the supported SoCs.
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fb2c55123a99..fd4a17812f33 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
+ * exynos_tmu.c - Samsung Exynos TMU (Thermal Management Unit)
*
* Copyright (C) 2014 Samsung Electronics
* Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
@@ -138,7 +138,7 @@ enum soc_type {
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
- driver
+ * driver
* @id: identifier of the one instance of the TMU controller.
* @base: base address of the single instance of the TMU controller.
* @base_second: base address of the common registers of the TMU controller.
@@ -162,8 +162,11 @@ enum soc_type {
* 0 < reference_voltage <= 31
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
+ * @tzd: pointer to thermal_zone_device structure
* @ntrip: number of supported trip points.
* @enabled: current status of TMU device
+ * @tmu_set_trip_temp: SoC specific method to set trip (rising threshold)
+ * @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold)
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -1183,7 +1186,7 @@ static struct platform_driver exynos_tmu_driver = {
module_platform_driver(exynos_tmu_driver);
-MODULE_DESCRIPTION("EXYNOS TMU Driver");
+MODULE_DESCRIPTION("Exynos TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index cf9ddc52f30e..1cc5e6c5709e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -30,7 +30,7 @@
#define DTS_DR_OFFSET 0x1C
#define DTS_SR_OFFSET 0x20
#define DTS_ITENR_OFFSET 0x24
-#define DTS_CIFR_OFFSET 0x28
+#define DTS_ICIFR_OFFSET 0x28
/* DTS_CFGR1 register mask definitions */
#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
@@ -51,10 +51,16 @@
/* DTS_DR register mask definitions */
#define TS1_MFREQ_MASK GENMASK(15, 0)
+/* DTS_ITENR register mask definitions */
+#define ITENR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
+
+/* DTS_ICIFR register mask definitions */
+#define ICIFR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
+
/* Less significant bit position definitions */
#define TS1_T0_POS 16
-#define TS1_SMP_TIME_POS 16
#define TS1_HITTHD_POS 16
+#define TS1_LITTHD_POS 0
#define HSREF_CLK_DIV_POS 24
/* DTS_CFGR1 bit definitions */
@@ -76,58 +82,59 @@
#define ONE_MHZ 1000000
#define POLL_TIMEOUT 5000
#define STARTUP_TIME 40
-#define TS1_T0_VAL0 30
-#define TS1_T0_VAL1 130
+#define TS1_T0_VAL0 30000 /* 30 celsius */
+#define TS1_T0_VAL1 130000 /* 130 celsius */
#define NO_HW_TRIG 0
-
-/* The Thermal Framework expects millidegrees */
-#define mcelsius(temp) ((temp) * 1000)
-
-/* The Sensor expects oC degrees */
-#define celsius(temp) ((temp) / 1000)
+#define SAMPLING_TIME 15
struct stm_thermal_sensor {
struct device *dev;
struct thermal_zone_device *th_dev;
enum thermal_device_mode mode;
struct clk *clk;
- int high_temp;
- int low_temp;
- int temp_critical;
- int temp_passive;
unsigned int low_temp_enabled;
- int num_trips;
+ unsigned int high_temp_enabled;
int irq;
- unsigned int irq_enabled;
void __iomem *base;
int t0, fmt0, ramp_coeff;
};
-static irqreturn_t stm_thermal_alarm_irq(int irq, void *sdata)
+static int stm_enable_irq(struct stm_thermal_sensor *sensor)
{
- struct stm_thermal_sensor *sensor = sdata;
+ u32 value;
- disable_irq_nosync(irq);
- sensor->irq_enabled = false;
+ dev_dbg(sensor->dev, "low:%d high:%d\n", sensor->low_temp_enabled,
+ sensor->high_temp_enabled);
- return IRQ_WAKE_THREAD;
+ /* Disable IT generation for low and high thresholds */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ value &= ~(LOW_THRESHOLD | HIGH_THRESHOLD);
+
+ if (sensor->low_temp_enabled)
+ value |= HIGH_THRESHOLD;
+
+ if (sensor->high_temp_enabled)
+ value |= LOW_THRESHOLD;
+
+ /* Enable interrupts */
+ writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
+
+ return 0;
}
-static irqreturn_t stm_thermal_alarm_irq_thread(int irq, void *sdata)
+static irqreturn_t stm_thermal_irq_handler(int irq, void *sdata)
{
- u32 value;
struct stm_thermal_sensor *sensor = sdata;
- /* read IT reason in SR and clear flags */
- value = readl_relaxed(sensor->base + DTS_SR_OFFSET);
+ dev_dbg(sensor->dev, "sr:%d\n",
+ readl_relaxed(sensor->base + DTS_SR_OFFSET));
- if ((value & LOW_THRESHOLD) == LOW_THRESHOLD)
- writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
- if ((value & HIGH_THRESHOLD) == HIGH_THRESHOLD)
- writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+ stm_enable_irq(sensor);
- thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+ /* Acknoledge all DTS irqs */
+ writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
return IRQ_HANDLED;
}
@@ -160,6 +167,8 @@ static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
writel_relaxed(value, sensor->base +
DTS_CFGR1_OFFSET);
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
return 0;
}
@@ -167,6 +176,8 @@ static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
{
u32 value;
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
/* Stop measuring */
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
value &= ~TS1_START;
@@ -263,60 +274,17 @@ static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
int temp, u32 *th)
{
int freqM;
- u32 sampling_time;
-
- /* Retrieve the number of periods to sample */
- sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
- TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
/* Figure out the CLK_PTAT frequency for a given temperature */
- freqM = ((temp - sensor->t0) * sensor->ramp_coeff)
- + sensor->fmt0;
-
- dev_dbg(sensor->dev, "%s: freqM for threshold = %d Hz",
- __func__, freqM);
+ freqM = ((temp - sensor->t0) * sensor->ramp_coeff) / 1000 +
+ sensor->fmt0;
/* Figure out the threshold sample number */
- *th = clk_get_rate(sensor->clk);
+ *th = clk_get_rate(sensor->clk) * SAMPLING_TIME / freqM;
if (!*th)
return -EINVAL;
- *th = *th / freqM;
-
- *th *= sampling_time;
-
- return 0;
-}
-
-static int stm_thermal_set_threshold(struct stm_thermal_sensor *sensor)
-{
- u32 value, th;
- int ret;
-
- value = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
-
- /* Erase threshold content */
- value &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
-
- /* Retrieve the sample threshold number th for a given temperature */
- ret = stm_thermal_calculate_threshold(sensor, sensor->high_temp, &th);
- if (ret)
- return ret;
-
- value |= th & TS1_LITTHD_MASK;
-
- if (sensor->low_temp_enabled) {
- /* Retrieve the sample threshold */
- ret = stm_thermal_calculate_threshold(sensor, sensor->low_temp,
- &th);
- if (ret)
- return ret;
-
- value |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
- }
-
- /* Write value on the Low interrupt threshold */
- writel_relaxed(value, sensor->base + DTS_ITR1_OFFSET);
+ dev_dbg(sensor->dev, "freqM=%d Hz, threshold=0x%x", freqM, *th);
return 0;
}
@@ -326,77 +294,57 @@ static int stm_disable_irq(struct stm_thermal_sensor *sensor)
{
u32 value;
- /* Disable IT generation for low and high thresholds */
+ /* Disable IT generation */
value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
- writel_relaxed(value & ~(LOW_THRESHOLD | HIGH_THRESHOLD),
- sensor->base + DTS_ITENR_OFFSET);
-
- dev_dbg(sensor->dev, "%s: IT disabled on sensor side", __func__);
-
- return 0;
-}
-
-/* Enable temperature interrupt */
-static int stm_enable_irq(struct stm_thermal_sensor *sensor)
-{
- u32 value;
-
- /*
- * Code below enables High temperature threshold using a low threshold
- * sampling value
- */
-
- /* Make sure LOW_THRESHOLD IT is clear before enabling */
- writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
-
- /* Enable IT generation for low threshold */
- value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
- value |= LOW_THRESHOLD;
-
- /* Enable the low temperature threshold if needed */
- if (sensor->low_temp_enabled) {
- /* Make sure HIGH_THRESHOLD IT is clear before enabling */
- writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
-
- /* Enable IT generation for high threshold */
- value |= HIGH_THRESHOLD;
- }
-
- /* Enable thresholds */
+ value &= ~ITENR_MASK;
writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
- dev_dbg(sensor->dev, "%s: IT enabled on sensor side", __func__);
-
return 0;
}
-static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
+static int stm_thermal_set_trips(void *data, int low, int high)
{
+ struct stm_thermal_sensor *sensor = data;
+ u32 itr1, th;
int ret;
- sensor->mode = THERMAL_DEVICE_DISABLED;
+ dev_dbg(sensor->dev, "set trips %d <--> %d\n", low, high);
- ret = stm_sensor_power_off(sensor);
- if (ret)
- return ret;
+ /* Erase threshold content */
+ itr1 = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
+ itr1 &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
- ret = stm_disable_irq(sensor);
- if (ret)
- return ret;
+ /*
+ * Disable low-temp if "low" is too small. As per thermal framework
+ * API, we use -INT_MAX rather than INT_MIN.
+ */
- ret = stm_thermal_set_threshold(sensor);
- if (ret)
- return ret;
+ if (low > -INT_MAX) {
+ sensor->low_temp_enabled = 1;
+ /* add 0.5 of hysteresis due to measurement error */
+ ret = stm_thermal_calculate_threshold(sensor, low - 500, &th);
+ if (ret)
+ return ret;
- ret = stm_enable_irq(sensor);
- if (ret)
- return ret;
+ itr1 |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
+ } else {
+ sensor->low_temp_enabled = 0;
+ }
- ret = stm_sensor_power_on(sensor);
- if (ret)
- return ret;
+ /* Disable high-temp if "high" is too big. */
+ if (high < INT_MAX) {
+ sensor->high_temp_enabled = 1;
+ ret = stm_thermal_calculate_threshold(sensor, high, &th);
+ if (ret)
+ return ret;
- sensor->mode = THERMAL_DEVICE_ENABLED;
+ itr1 |= (TS1_LITTHD_MASK & (th << TS1_LITTHD_POS));
+ } else {
+ sensor->high_temp_enabled = 0;
+ }
+
+ /* Write new threshod values*/
+ writel_relaxed(itr1, sensor->base + DTS_ITR1_OFFSET);
return 0;
}
@@ -405,76 +353,26 @@ static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
static int stm_thermal_get_temp(void *data, int *temp)
{
struct stm_thermal_sensor *sensor = data;
- u32 sampling_time;
+ u32 periods;
int freqM, ret;
if (sensor->mode != THERMAL_DEVICE_ENABLED)
return -EAGAIN;
- /* Retrieve the number of samples */
- ret = readl_poll_timeout(sensor->base + DTS_DR_OFFSET, freqM,
- (freqM & TS1_MFREQ_MASK), STARTUP_TIME,
- POLL_TIMEOUT);
-
+ /* Retrieve the number of periods sampled */
+ ret = readl_relaxed_poll_timeout(sensor->base + DTS_DR_OFFSET, periods,
+ (periods & TS1_MFREQ_MASK),
+ STARTUP_TIME, POLL_TIMEOUT);
if (ret)
return ret;
- if (!freqM)
- return -ENODATA;
-
- /* Retrieve the number of periods sampled */
- sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
- TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
-
- /* Figure out the number of samples per period */
- freqM /= sampling_time;
-
/* Figure out the CLK_PTAT frequency */
- freqM = clk_get_rate(sensor->clk) / freqM;
+ freqM = (clk_get_rate(sensor->clk) * SAMPLING_TIME) / periods;
if (!freqM)
return -EINVAL;
- dev_dbg(sensor->dev, "%s: freqM=%d\n", __func__, freqM);
-
/* Figure out the temperature in mili celsius */
- *temp = mcelsius(sensor->t0 + ((freqM - sensor->fmt0) /
- sensor->ramp_coeff));
-
- dev_dbg(sensor->dev, "%s: temperature = %d millicelsius",
- __func__, *temp);
-
- /* Update thresholds */
- if (sensor->num_trips > 1) {
- /* Update alarm threshold value to next higher trip point */
- if (sensor->high_temp == sensor->temp_passive &&
- celsius(*temp) >= sensor->temp_passive) {
- sensor->high_temp = sensor->temp_critical;
- sensor->low_temp = sensor->temp_passive;
- sensor->low_temp_enabled = true;
- ret = stm_thermal_update_threshold(sensor);
- if (ret)
- return ret;
- }
-
- if (sensor->high_temp == sensor->temp_critical &&
- celsius(*temp) < sensor->temp_passive) {
- sensor->high_temp = sensor->temp_passive;
- sensor->low_temp_enabled = false;
- ret = stm_thermal_update_threshold(sensor);
- if (ret)
- return ret;
- }
-
- /*
- * Re-enable alarm IRQ if temperature below critical
- * temperature
- */
- if (!sensor->irq_enabled &&
- (celsius(*temp) < sensor->temp_critical)) {
- sensor->irq_enabled = true;
- enable_irq(sensor->irq);
- }
- }
+ *temp = (freqM - sensor->fmt0) * 1000 / sensor->ramp_coeff + sensor->t0;
return 0;
}
@@ -493,8 +391,8 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor)
}
ret = devm_request_threaded_irq(dev, sensor->irq,
- stm_thermal_alarm_irq,
- stm_thermal_alarm_irq_thread,
+ NULL,
+ stm_thermal_irq_handler,
IRQF_ONESHOT,
dev->driver->name, sensor);
if (ret) {
@@ -503,8 +401,6 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor)
return ret;
}
- sensor->irq_enabled = true;
-
dev_dbg(dev, "%s: thermal IRQ registered", __func__);
return 0;
@@ -514,6 +410,8 @@ static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
{
int ret;
+ stm_disable_irq(sensor);
+
ret = stm_sensor_power_off(sensor);
if (ret)
return ret;
@@ -526,7 +424,6 @@ static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
{
int ret;
- struct device *dev = sensor->dev;
ret = clk_prepare_enable(sensor->clk);
if (ret)
@@ -540,26 +437,8 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
if (ret)
goto thermal_unprepare;
- /* Set threshold(s) for IRQ */
- ret = stm_thermal_set_threshold(sensor);
- if (ret)
- goto thermal_unprepare;
-
- ret = stm_enable_irq(sensor);
- if (ret)
- goto thermal_unprepare;
-
- ret = stm_sensor_power_on(sensor);
- if (ret) {
- dev_err(dev, "%s: failed to power on sensor\n", __func__);
- goto irq_disable;
- }
-
return 0;
-irq_disable:
- stm_disable_irq(sensor);
-
thermal_unprepare:
clk_disable_unprepare(sensor->clk);
@@ -576,8 +455,6 @@ static int stm_thermal_suspend(struct device *dev)
if (ret)
return ret;
- sensor->mode = THERMAL_DEVICE_DISABLED;
-
return 0;
}
@@ -590,7 +467,12 @@ static int stm_thermal_resume(struct device *dev)
if (ret)
return ret;
- sensor->mode = THERMAL_DEVICE_ENABLED;
+ ret = stm_sensor_power_on(sensor);
+ if (ret)
+ return ret;
+
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+ stm_enable_irq(sensor);
return 0;
}
@@ -600,6 +482,7 @@ SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_of_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
+ .set_trips = stm_thermal_set_trips,
};
static const struct of_device_id stm_thermal_of_match[] = {
@@ -612,9 +495,8 @@ static int stm_thermal_probe(struct platform_device *pdev)
{
struct stm_thermal_sensor *sensor;
struct resource *res;
- const struct thermal_trip *trip;
void __iomem *base;
- int ret, i;
+ int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "%s: device tree node not found\n",
@@ -645,10 +527,23 @@ static int stm_thermal_probe(struct platform_device *pdev)
return PTR_ERR(sensor->clk);
}
- /* Register IRQ into GIC */
- ret = stm_register_irq(sensor);
- if (ret)
+ stm_disable_irq(sensor);
+
+ /* Clear irq flags */
+ writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
+
+ /* Configure and enable HW sensor */
+ ret = stm_thermal_prepare(sensor);
+ if (ret) {
+ dev_err(&pdev->dev, "Error preprare sensor: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret) {
+ dev_err(&pdev->dev, "Error power on sensor: %d\n", ret);
return ret;
+ }
sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
sensor,
@@ -661,53 +556,12 @@ static int stm_thermal_probe(struct platform_device *pdev)
return ret;
}
- if (!sensor->th_dev->ops->get_crit_temp) {
- /* Critical point must be provided */
- ret = -EINVAL;
- goto err_tz;
- }
-
- ret = sensor->th_dev->ops->get_crit_temp(sensor->th_dev,
- &sensor->temp_critical);
- if (ret) {
- dev_err(&pdev->dev,
- "Not able to read critical_temp: %d\n", ret);
+ /* Register IRQ into GIC */
+ ret = stm_register_irq(sensor);
+ if (ret)
goto err_tz;
- }
-
- sensor->temp_critical = celsius(sensor->temp_critical);
-
- /* Set thresholds for IRQ */
- sensor->high_temp = sensor->temp_critical;
-
- trip = of_thermal_get_trip_points(sensor->th_dev);
- sensor->num_trips = of_thermal_get_ntrips(sensor->th_dev);
-
- /* Find out passive temperature if it exists */
- for (i = (sensor->num_trips - 1); i >= 0; i--) {
- if (trip[i].type == THERMAL_TRIP_PASSIVE) {
- sensor->temp_passive = celsius(trip[i].temperature);
- /* Update high temperature threshold */
- sensor->high_temp = sensor->temp_passive;
- }
- }
- /*
- * Ensure low_temp_enabled flag is disabled.
- * By disabling low_temp_enabled, low threshold IT will not be
- * configured neither enabled because it is not needed as high
- * threshold is set on the lowest temperature trip point after
- * probe.
- */
- sensor->low_temp_enabled = false;
-
- /* Configure and enable HW sensor */
- ret = stm_thermal_prepare(sensor);
- if (ret) {
- dev_err(&pdev->dev,
- "Not able to enable sensor: %d\n", ret);
- goto err_tz;
- }
+ stm_enable_irq(sensor);
/*
* Thermal_zone doesn't enable hwmon as default,
@@ -718,8 +572,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
if (ret)
goto err_tz;
- sensor->mode = THERMAL_DEVICE_ENABLED;
-
dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
__func__);
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 6e051cbd824f..2ae7198d3067 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -174,8 +174,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* step_wise_throttle - throttles devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* Throttling Logic: This uses the trend of the thermal zone to throttle.
* If the thermal zone is 'heating up' this throttles all the cooling
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
new file mode 100644
index 000000000000..74d73be16496
--- /dev/null
+++ b/drivers/thermal/sun8i_thermal.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thermal sensor driver for Allwinner SOC
+ * Copyright (C) 2019 Yangtao Li
+ *
+ * Based on the work of Icenowy Zheng <icenowy@aosc.io>
+ * Based on the work of Ondrej Jirman <megous@megous.com>
+ * Based on the work of Josef Gajdusek <atx@atx.name>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "thermal_hwmon.h"
+
+#define MAX_SENSOR_NUM 4
+
+#define FT_TEMP_MASK GENMASK(11, 0)
+#define TEMP_CALIB_MASK GENMASK(11, 0)
+#define CALIBRATE_DEFAULT 0x800
+
+#define SUN8I_THS_CTRL0 0x00
+#define SUN8I_THS_CTRL2 0x40
+#define SUN8I_THS_IC 0x44
+#define SUN8I_THS_IS 0x48
+#define SUN8I_THS_MFC 0x70
+#define SUN8I_THS_TEMP_CALIB 0x74
+#define SUN8I_THS_TEMP_DATA 0x80
+
+#define SUN50I_THS_CTRL0 0x00
+#define SUN50I_H6_THS_ENABLE 0x04
+#define SUN50I_H6_THS_PC 0x08
+#define SUN50I_H6_THS_DIC 0x10
+#define SUN50I_H6_THS_DIS 0x20
+#define SUN50I_H6_THS_MFC 0x30
+#define SUN50I_H6_THS_TEMP_CALIB 0xa0
+#define SUN50I_H6_THS_TEMP_DATA 0xc0
+
+#define SUN8I_THS_CTRL0_T_ACQ0(x) (GENMASK(15, 0) & (x))
+#define SUN8I_THS_CTRL2_T_ACQ1(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN8I_THS_DATA_IRQ_STS(x) BIT(x + 8)
+
+#define SUN50I_THS_CTRL0_T_ACQ(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN50I_THS_FILTER_EN BIT(2)
+#define SUN50I_THS_FILTER_TYPE(x) (GENMASK(1, 0) & (x))
+#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
+#define SUN50I_H6_THS_DATA_IRQ_STS(x) BIT(x)
+
+/* millidegree celsius */
+
+struct tsensor {
+ struct ths_device *tmdev;
+ struct thermal_zone_device *tzd;
+ int id;
+};
+
+struct ths_thermal_chip {
+ bool has_mod_clk;
+ bool has_bus_clk_reset;
+ int sensor_num;
+ int offset;
+ int scale;
+ int ft_deviation;
+ int temp_data_base;
+ int (*calibrate)(struct ths_device *tmdev,
+ u16 *caldata, int callen);
+ int (*init)(struct ths_device *tmdev);
+ int (*irq_ack)(struct ths_device *tmdev);
+ int (*calc_temp)(struct ths_device *tmdev,
+ int id, int reg);
+};
+
+struct ths_device {
+ const struct ths_thermal_chip *chip;
+ struct device *dev;
+ struct regmap *regmap;
+ struct reset_control *reset;
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct tsensor sensor[MAX_SENSOR_NUM];
+};
+
+/* Temp Unit: millidegree Celsius */
+static int sun8i_ths_calc_temp(struct ths_device *tmdev,
+ int id, int reg)
+{
+ return tmdev->chip->offset - (reg * tmdev->chip->scale / 10);
+}
+
+static int sun50i_h5_calc_temp(struct ths_device *tmdev,
+ int id, int reg)
+{
+ if (reg >= 0x500)
+ return -1191 * reg / 10 + 223000;
+ else if (!id)
+ return -1452 * reg / 10 + 259000;
+ else
+ return -1590 * reg / 10 + 276000;
+}
+
+static int sun8i_ths_get_temp(void *data, int *temp)
+{
+ struct tsensor *s = data;
+ struct ths_device *tmdev = s->tmdev;
+ int val = 0;
+
+ regmap_read(tmdev->regmap, tmdev->chip->temp_data_base +
+ 0x4 * s->id, &val);
+
+ /* ths have no data yet */
+ if (!val)
+ return -EAGAIN;
+
+ *temp = tmdev->chip->calc_temp(tmdev, s->id, val);
+ /*
+ * According to the original sdk, there are some platforms(rarely)
+ * that add a fixed offset value after calculating the temperature
+ * value. We can't simply put it on the formula for calculating the
+ * temperature above, because the formula for calculating the
+ * temperature above is also used when the sensor is calibrated. If
+ * do this, the correct calibration formula is hard to know.
+ */
+ *temp += tmdev->chip->ft_deviation;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ths_ops = {
+ .get_temp = sun8i_ths_get_temp,
+};
+
+static const struct regmap_config config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+ .max_register = 0xfc,
+};
+
+static int sun8i_h3_irq_ack(struct ths_device *tmdev)
+{
+ int i, state, ret = 0;
+
+ regmap_read(tmdev->regmap, SUN8I_THS_IS, &state);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & SUN8I_THS_DATA_IRQ_STS(i)) {
+ regmap_write(tmdev->regmap, SUN8I_THS_IS,
+ SUN8I_THS_DATA_IRQ_STS(i));
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+static int sun50i_h6_irq_ack(struct ths_device *tmdev)
+{
+ int i, state, ret = 0;
+
+ regmap_read(tmdev->regmap, SUN50I_H6_THS_DIS, &state);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & SUN50I_H6_THS_DATA_IRQ_STS(i)) {
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_DIS,
+ SUN50I_H6_THS_DATA_IRQ_STS(i));
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t sun8i_irq_thread(int irq, void *data)
+{
+ struct ths_device *tmdev = data;
+ int i, state;
+
+ state = tmdev->chip->irq_ack(tmdev);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & BIT(i))
+ thermal_zone_device_update(tmdev->sensor[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun8i_h3_ths_calibrate(struct ths_device *tmdev,
+ u16 *caldata, int callen)
+{
+ int i;
+
+ if (!caldata[0] || callen < 2 * tmdev->chip->sensor_num)
+ return -EINVAL;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ int offset = (i % 2) << 4;
+
+ regmap_update_bits(tmdev->regmap,
+ SUN8I_THS_TEMP_CALIB + (4 * (i >> 1)),
+ 0xfff << offset,
+ caldata[i] << offset);
+ }
+
+ return 0;
+}
+
+static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
+ u16 *caldata, int callen)
+{
+ struct device *dev = tmdev->dev;
+ int i, ft_temp;
+
+ if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num)
+ return -EINVAL;
+
+ /*
+ * efuse layout:
+ *
+ * 0 11 16 32
+ * +-------+-------+-------+
+ * |temp| |sensor0|sensor1|
+ * +-------+-------+-------+
+ *
+ * The calibration data on the H6 is the ambient temperature and
+ * sensor values that are filled during the factory test stage.
+ *
+ * The unit of stored FT temperature is 0.1 degreee celusis.
+ *
+ * We need to calculate a delta between measured and caluclated
+ * register values and this will become a calibration offset.
+ */
+ ft_temp = (caldata[0] & FT_TEMP_MASK) * 100;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ int sensor_reg = caldata[i + 1];
+ int cdata, offset;
+ int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
+
+ /*
+ * Calibration data is CALIBRATE_DEFAULT - (calculated
+ * temperature from sensor reading at factory temperature
+ * minus actual factory temperature) * 14.88 (scale from
+ * temperature to register values)
+ */
+ cdata = CALIBRATE_DEFAULT -
+ ((sensor_temp - ft_temp) * 10 / tmdev->chip->scale);
+ if (cdata & ~TEMP_CALIB_MASK) {
+ /*
+ * Calibration value more than 12-bit, but calibration
+ * register is 12-bit. In this case, ths hardware can
+ * still work without calibration, although the data
+ * won't be so accurate.
+ */
+ dev_warn(dev, "sensor%d is not calibrated.\n", i);
+ continue;
+ }
+
+ offset = (i % 2) * 16;
+ regmap_update_bits(tmdev->regmap,
+ SUN50I_H6_THS_TEMP_CALIB + (i / 2 * 4),
+ 0xfff << offset,
+ cdata << offset);
+ }
+
+ return 0;
+}
+
+static int sun8i_ths_calibrate(struct ths_device *tmdev)
+{
+ struct nvmem_cell *calcell;
+ struct device *dev = tmdev->dev;
+ u16 *caldata;
+ size_t callen;
+ int ret = 0;
+
+ calcell = devm_nvmem_cell_get(dev, "calibration");
+ if (IS_ERR(calcell)) {
+ if (PTR_ERR(calcell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ /*
+ * Even if the external calibration data stored in sid is
+ * not accessible, the THS hardware can still work, although
+ * the data won't be so accurate.
+ *
+ * The default value of calibration register is 0x800 for
+ * every sensor, and the calibration value is usually 0x7xx
+ * or 0x8xx, so they won't be away from the default value
+ * for a lot.
+ *
+ * So here we do not return error if the calibartion data is
+ * not available, except the probe needs deferring.
+ */
+ goto out;
+ }
+
+ caldata = nvmem_cell_read(calcell, &callen);
+ if (IS_ERR(caldata)) {
+ ret = PTR_ERR(caldata);
+ goto out;
+ }
+
+ tmdev->chip->calibrate(tmdev, caldata, callen);
+
+ kfree(caldata);
+out:
+ return ret;
+}
+
+static int sun8i_ths_resource_init(struct ths_device *tmdev)
+{
+ struct device *dev = tmdev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tmdev->regmap = devm_regmap_init_mmio(dev, base, &config);
+ if (IS_ERR(tmdev->regmap))
+ return PTR_ERR(tmdev->regmap);
+
+ if (tmdev->chip->has_bus_clk_reset) {
+ tmdev->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tmdev->reset))
+ return PTR_ERR(tmdev->reset);
+
+ tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(tmdev->bus_clk))
+ return PTR_ERR(tmdev->bus_clk);
+ }
+
+ if (tmdev->chip->has_mod_clk) {
+ tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(tmdev->mod_clk))
+ return PTR_ERR(tmdev->mod_clk);
+ }
+
+ ret = reset_control_deassert(tmdev->reset);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(tmdev->bus_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_set_rate(tmdev->mod_clk, 24000000);
+ if (ret)
+ goto bus_disable;
+
+ ret = clk_prepare_enable(tmdev->mod_clk);
+ if (ret)
+ goto bus_disable;
+
+ ret = sun8i_ths_calibrate(tmdev);
+ if (ret)
+ goto mod_disable;
+
+ return 0;
+
+mod_disable:
+ clk_disable_unprepare(tmdev->mod_clk);
+bus_disable:
+ clk_disable_unprepare(tmdev->bus_clk);
+assert_reset:
+ reset_control_assert(tmdev->reset);
+
+ return ret;
+}
+
+static int sun8i_h3_thermal_init(struct ths_device *tmdev)
+{
+ int val;
+
+ /* average over 4 samples */
+ regmap_write(tmdev->regmap, SUN8I_THS_MFC,
+ SUN50I_THS_FILTER_EN |
+ SUN50I_THS_FILTER_TYPE(1));
+ /*
+ * clkin = 24MHz
+ * filter_samples = 4
+ * period = 0.25s
+ *
+ * x = period * clkin / 4096 / filter_samples - 1
+ * = 365
+ */
+ val = GENMASK(7 + tmdev->chip->sensor_num, 8);
+ regmap_write(tmdev->regmap, SUN8I_THS_IC,
+ SUN50I_H6_THS_PC_TEMP_PERIOD(365) | val);
+ /*
+ * T_acq = 20us
+ * clkin = 24MHz
+ *
+ * x = T_acq * clkin - 1
+ * = 479
+ */
+ regmap_write(tmdev->regmap, SUN8I_THS_CTRL0,
+ SUN8I_THS_CTRL0_T_ACQ0(479));
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN8I_THS_CTRL2,
+ SUN8I_THS_CTRL2_T_ACQ1(479) | val);
+
+ return 0;
+}
+
+/*
+ * Without this undocummented value, the returned temperatures would
+ * be higher than real ones by about 20C.
+ */
+#define SUN50I_H6_CTRL0_UNK 0x0000002f
+
+static int sun50i_h6_thermal_init(struct ths_device *tmdev)
+{
+ int val;
+
+ /*
+ * T_acq = 20us
+ * clkin = 24MHz
+ *
+ * x = T_acq * clkin - 1
+ * = 479
+ */
+ regmap_write(tmdev->regmap, SUN50I_THS_CTRL0,
+ SUN50I_H6_CTRL0_UNK | SUN50I_THS_CTRL0_T_ACQ(479));
+ /* average over 4 samples */
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_MFC,
+ SUN50I_THS_FILTER_EN |
+ SUN50I_THS_FILTER_TYPE(1));
+ /*
+ * clkin = 24MHz
+ * filter_samples = 4
+ * period = 0.25s
+ *
+ * x = period * clkin / 4096 / filter_samples - 1
+ * = 365
+ */
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_PC,
+ SUN50I_H6_THS_PC_TEMP_PERIOD(365));
+ /* enable sensor */
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_ENABLE, val);
+ /* thermal data interrupt enable */
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_DIC, val);
+
+ return 0;
+}
+
+static int sun8i_ths_register(struct ths_device *tmdev)
+{
+ int i;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tmdev->sensor[i].tzd =
+ devm_thermal_zone_of_sensor_register(tmdev->dev,
+ i,
+ &tmdev->sensor[i],
+ &ths_ops);
+ if (IS_ERR(tmdev->sensor[i].tzd))
+ return PTR_ERR(tmdev->sensor[i].tzd);
+
+ if (devm_thermal_add_hwmon_sysfs(tmdev->sensor[i].tzd))
+ dev_warn(tmdev->dev,
+ "Failed to add hwmon sysfs attributes\n");
+ }
+
+ return 0;
+}
+
+static int sun8i_ths_probe(struct platform_device *pdev)
+{
+ struct ths_device *tmdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev), GFP_KERNEL);
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->chip = of_device_get_match_data(&pdev->dev);
+ if (!tmdev->chip)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, tmdev);
+
+ ret = sun8i_ths_resource_init(tmdev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = tmdev->chip->init(tmdev);
+ if (ret)
+ return ret;
+
+ ret = sun8i_ths_register(tmdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Avoid entering the interrupt handler, the thermal device is not
+ * registered yet, we deffer the registration of the interrupt to
+ * the end.
+ */
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ sun8i_irq_thread,
+ IRQF_ONESHOT, "ths", tmdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sun8i_ths_remove(struct platform_device *pdev)
+{
+ struct ths_device *tmdev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(tmdev->mod_clk);
+ clk_disable_unprepare(tmdev->bus_clk);
+ reset_control_assert(tmdev->reset);
+
+ return 0;
+}
+
+static const struct ths_thermal_chip sun8i_a83t_ths = {
+ .sensor_num = 3,
+ .scale = 705,
+ .offset = 191668,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun8i_h3_ths = {
+ .sensor_num = 1,
+ .scale = 1211,
+ .offset = 217000,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun8i_r40_ths = {
+ .sensor_num = 2,
+ .offset = 251086,
+ .scale = 1130,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_a64_ths = {
+ .sensor_num = 3,
+ .offset = 260890,
+ .scale = 1170,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_h5_ths = {
+ .sensor_num = 2,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun50i_h5_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_h6_ths = {
+ .sensor_num = 2,
+ .has_bus_clk_reset = true,
+ .ft_deviation = 7000,
+ .offset = 187744,
+ .scale = 672,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct of_device_id of_ths_match[] = {
+ { .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
+ { .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
+ { .compatible = "allwinner,sun8i-r40-ths", .data = &sun8i_r40_ths },
+ { .compatible = "allwinner,sun50i-a64-ths", .data = &sun50i_a64_ths },
+ { .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
+ { .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_ths_match);
+
+static struct platform_driver ths_driver = {
+ .probe = sun8i_ths_probe,
+ .remove = sun8i_ths_remove,
+ .driver = {
+ .name = "sun8i-thermal",
+ .of_match_table = of_ths_match,
+ },
+};
+module_platform_driver(ths_driver);
+
+MODULE_DESCRIPTION("Thermal sensor driver for Allwinner SOC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 5acaad3a594f..66e0639da4bf 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -360,7 +360,7 @@ static struct soctherm_oc_irq_chip_data soc_irq_cdata;
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
- * @v: the value to write
+ * @value: the value to write
* @reg: the register offset
*
* Writes @v to @reg. No return value.
@@ -435,6 +435,7 @@ static int tegra_thermctl_get_temp(void *data, int *out_temp)
/**
* enforce_temp_range() - check and enforce temperature range [min, max]
+ * @dev: struct device * of the SOC_THERM instance
* @trip_temp: the trip temperature to check
*
* Checks and enforces the permitted temperature range that SOC_THERM
@@ -747,6 +748,8 @@ static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
/**
* tegra_soctherm_set_hwtrips() - set HW trip point from DT data
* @dev: struct device * of the SOC_THERM instance
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @tz: struct thermal_zone_device *
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
* "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
@@ -931,6 +934,7 @@ static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
/**
* soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
+ * @ts: pointer to a struct tegra_soctherm
* @alarm: The soctherm throttle id
* @enable: Flag indicating enable the soctherm over-current
* interrupt or disable it
@@ -1156,7 +1160,7 @@ static void soctherm_oc_irq_enable(struct irq_data *data)
/**
* soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
- * @irq_data: The interrupt request information
+ * @data: The interrupt request information
*
* Clears the interrupt request enable bit of the overcurrent
* interrupt request chip data.
@@ -1206,6 +1210,7 @@ static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
/**
* soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
* @d: Interrupt request domain
+ * @ctrlr: Controller device tree node
* @intspec: Array of u32s from DTs "interrupt" property
* @intsize: Number of values inside the intspec array
* @out_hwirq: HW IRQ value associated with this interrupt
@@ -1681,6 +1686,7 @@ err:
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
+ * @pdev: Pointer to platform_device struct
*/
static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
{
@@ -1751,6 +1757,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
/**
* throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
+ * @ts: pointer to a struct tegra_soctherm
* @level: describing the level LOW/MED/HIGH of throttling
*
* It's necessary to set up the CPU-local CCROC NV_THERM instance with
@@ -1798,6 +1805,7 @@ static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
/**
* throttlectl_cpu_level_select() - program CPU pulse skipper config
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
@@ -1841,6 +1849,7 @@ static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
/**
* throttlectl_cpu_mn() - program CPU pulse skipper configuration
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
@@ -1874,6 +1883,7 @@ static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
/**
* throttlectl_gpu_level_select() - selects throttling level for GPU
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* This function programs soctherm's interface to GK20a NV_THERM to select
@@ -1918,6 +1928,7 @@ static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
/**
* soctherm_throttle_program() - programs pulse skippers' configuration
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of the throttle event id.
*
* Pulse skippers are used to throttle clock frequencies.
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index ae5743c9a894..73665c3ccfe0 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -76,13 +76,17 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
struct gadc_thermal_info *gti)
{
struct device_node *np = dev->of_node;
+ enum iio_chan_type chan_type;
int ntable;
int ret;
ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
sizeof(u32));
if (ntable <= 0) {
- dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n");
+ ret = iio_get_channel_type(gti->channel, &chan_type);
+ if (ret || chan_type != IIO_TEMP)
+ dev_notice(dev,
+ "no lookup table, assuming DAC channel returns milliCelcius\n");
return 0;
}
@@ -124,13 +128,6 @@ static int gadc_thermal_probe(struct platform_device *pdev)
if (!gti)
return -ENOMEM;
- ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
- if (ret < 0)
- return ret;
-
- gti->dev = &pdev->dev;
- platform_set_drvdata(pdev, gti);
-
gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
if (IS_ERR(gti->channel)) {
ret = PTR_ERR(gti->channel);
@@ -139,6 +136,13 @@ static int gadc_thermal_probe(struct platform_device *pdev)
return ret;
}
+ ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+ if (ret < 0)
+ return ret;
+
+ gti->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gti);
+
gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
&gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 207b0cda70da..a9bf00e91d64 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -92,14 +92,12 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
/* device tree support */
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
-void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
-static inline void of_thermal_destroy_zones(void) { }
static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
{
return 0;
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index dd5d8ee37928..c8d2620f2e42 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -248,3 +248,31 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
kfree(hwmon);
}
EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
+
+static void devm_thermal_hwmon_release(struct device *dev, void *res)
+{
+ thermal_remove_hwmon_sysfs(*(struct thermal_zone_device **)res);
+}
+
+int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_zone_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = thermal_add_hwmon_sysfs(tz);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = tz;
+ devres_add(&tz->device, ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_add_hwmon_sysfs);
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
index a160b9d62dd0..1a9d65f6a6a8 100644
--- a/drivers/thermal/thermal_hwmon.h
+++ b/drivers/thermal/thermal_hwmon.h
@@ -17,6 +17,7 @@
#ifdef CONFIG_THERMAL_HWMON
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
+int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
#else
static inline int
@@ -25,6 +26,12 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
}
+static inline int
+devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
static inline void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 962873fd9242..293cffd9c8ad 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -17,8 +17,8 @@
/**
* notify_user_space - Notifies user space about thermal events
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* This function notifies the user space through UEvents.
*/
diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c
index 7c8a82c8e1e9..8e3a2d3c2f9a 100644
--- a/drivers/thermal/zx2967_thermal.c
+++ b/drivers/thermal/zx2967_thermal.c
@@ -45,6 +45,7 @@
* @clk_topcrm: topcrm clk structure
* @clk_apb: apb clk structure
* @regs: pointer to base address of the thermal sensor
+ * @dev: struct device pointer
*/
struct zx2967_thermal_priv {
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index fd9adca898ff..1eb757e8df3b 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig THUNDERBOLT
- tristate "Thunderbolt support"
+menuconfig USB4
+ tristate "Unified support for USB4 and Thunderbolt"
depends on PCI
depends on X86 || COMPILE_TEST
select APPLE_PROPERTIES if EFI_STUB && X86
@@ -9,9 +9,10 @@ menuconfig THUNDERBOLT
select CRYPTO_HASH
select NVMEM
help
- Thunderbolt Controller driver. This driver is required if you
- want to hotplug Thunderbolt devices on Apple hardware or on PCs
- with Intel Falcon Ridge or newer.
+ USB4 and Thunderbolt driver. USB4 is the public speficiation
+ based on Thunderbolt 3 protocol. This driver is required if
+ you want to hotplug Thunderbolt and USB4 compliant devices on
+ Apple hardware or on PCs with Intel Falcon Ridge or newer.
To compile this driver a module, choose M here. The module will be
called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 001187c577bf..eae28dd45250 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index fdd77bb4628d..19db6cdc5b70 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -113,7 +113,16 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
return ret;
}
-static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+/**
+ * tb_switch_find_cap() - Find switch capability
+ * @sw Switch to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
int offset = sw->config.first_cap_offset;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d97813e80e5f..f77ceae5c7d7 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -708,19 +708,26 @@ void tb_ctl_stop(struct tb_ctl *ctl)
/* public interface, commands */
/**
- * tb_cfg_error() - send error packet
+ * tb_cfg_ack_plug() - Ack hot plug/unplug event
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @port: Port where the hot plug/unplug happened
+ * @unplug: Ack hot plug or unplug
*
- * Return: Returns 0 on success or an error code on failure.
+ * Call this as response for hot plug/unplug event to ack it.
+ * Returns %0 on success or an error code on failure.
*/
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
- enum tb_cfg_error error)
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
{
struct cfg_error_pkg pkg = {
.header = tb_cfg_make_header(route),
.port = port,
- .error = error,
+ .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+ .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
+ : TB_CFG_ERROR_PG_HOT_PLUG,
};
- tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
+ tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+ unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 2f1a1e111110..97cb03b38953 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -123,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
return header;
}
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
- enum tb_cfg_error error);
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
int timeout_msec);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 8dd7de0cc826..921d164b3f35 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -131,12 +131,51 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
}
/**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+ struct tb_cap_plug_events cap;
+ int res;
+
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+ return -ENODEV;
+ }
+ res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+ sizeof(cap) / 4);
+ if (res)
+ return res;
+
+ if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+ tb_sw_warn(sw, "no NVM\n");
+ return -ENODEV;
+ }
+
+ if (cap.drom_offset > 0xffff) {
+ tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+ cap.drom_offset);
+ return -ENXIO;
+ }
+ *offset = cap.drom_offset;
+ return 0;
+}
+
+/**
* tb_eeprom_read_n - read count bytes from offset into val
*/
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
size_t count)
{
+ u16 drom_offset;
int i, res;
+
+ res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+ if (res)
+ return res;
+
+ offset += drom_offset;
+
res = tb_eeprom_active(sw, true);
if (res)
return res;
@@ -239,36 +278,6 @@ struct tb_drom_entry_port {
/**
- * tb_eeprom_get_drom_offset - get drom offset within eeprom
- */
-static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
-{
- struct tb_cap_plug_events cap;
- int res;
- if (!sw->cap_plug_events) {
- tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
- return -ENOSYS;
- }
- res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
- sizeof(cap) / 4);
- if (res)
- return res;
-
- if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
- tb_sw_warn(sw, "no NVM\n");
- return -ENOSYS;
- }
-
- if (cap.drom_offset > 0xffff) {
- tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
- cap.drom_offset);
- return -ENXIO;
- }
- *offset = cap.drom_offset;
- return 0;
-}
-
-/**
* tb_drom_read_uid_only - read uid directly from drom
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
@@ -277,17 +286,11 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
{
u8 data[9];
- u16 drom_offset;
u8 crc;
- int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
- if (res)
- return res;
-
- if (drom_offset == 0)
- return -ENODEV;
+ int res;
/* read uid */
- res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+ res = tb_eeprom_read_n(sw, 0, data, 9);
if (res)
return res;
@@ -484,12 +487,42 @@ err_free:
return ret;
}
+static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
+{
+ int ret;
+
+ ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
+ if (ret)
+ return ret;
+
+ /* Size includes CRC8 + UID + CRC32 */
+ *size += 1 + 8 + 4;
+ sw->drom = kzalloc(*size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
+ if (ret) {
+ kfree(sw->drom);
+ sw->drom = NULL;
+ }
+
+ return ret;
+}
+
+static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+ size_t count)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_drom_read(sw, offset, val, count);
+ return tb_eeprom_read_n(sw, offset, val, count);
+}
+
/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
int tb_drom_read(struct tb_switch *sw)
{
- u16 drom_offset;
u16 size;
u32 crc;
struct tb_drom_header *header;
@@ -510,18 +543,26 @@ int tb_drom_read(struct tb_switch *sw)
goto parse;
/*
- * The root switch contains only a dummy drom (header only,
- * no entries). Hardcode the configuration here.
+ * USB4 hosts may support reading DROM through router
+ * operations.
*/
- tb_drom_read_uid_only(sw, &sw->uid);
+ if (tb_switch_is_usb4(sw)) {
+ usb4_switch_read_uid(sw, &sw->uid);
+ if (!usb4_copy_host_drom(sw, &size))
+ goto parse;
+ } else {
+ /*
+ * The root switch contains only a dummy drom
+ * (header only, no entries). Hardcode the
+ * configuration here.
+ */
+ tb_drom_read_uid_only(sw, &sw->uid);
+ }
+
return 0;
}
- res = tb_eeprom_get_drom_offset(sw, &drom_offset);
- if (res)
- return res;
-
- res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+ res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
if (res)
return res;
size &= 0x3ff;
@@ -535,7 +576,7 @@ int tb_drom_read(struct tb_switch *sw)
sw->drom = kzalloc(size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
- res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+ res = tb_drom_read_n(sw, 0, sw->drom, size);
if (res)
goto err;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 641b21b54460..1be491ecbb45 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1271,6 +1271,9 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+
{ 0,}
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index b7b973949f8e..5d276ee9b38e 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -74,4 +74,6 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
+#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ca86a8e09c77..ad5479f21174 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -163,10 +163,12 @@ static int nvm_validate_and_write(struct tb_switch *sw)
image_size -= hdr_size;
}
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_write(sw, 0, buf, image_size);
return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
}
-static int nvm_authenticate_host(struct tb_switch *sw)
+static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
{
int ret = 0;
@@ -206,7 +208,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
return ret;
}
-static int nvm_authenticate_device(struct tb_switch *sw)
+static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
{
int ret, retries = 10;
@@ -251,6 +253,78 @@ static int nvm_authenticate_device(struct tb_switch *sw)
return -ETIMEDOUT;
}
+static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ /*
+ * During host router NVM upgrade we should not allow root port to
+ * go into D3cold because some root ports cannot trigger PME
+ * itself. To be on the safe side keep the root port in D0 during
+ * the whole upgrade process.
+ */
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_put(&root_port->dev);
+}
+
+static inline bool nvm_readable(struct tb_switch *sw)
+{
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * USB4 devices must support NVM operations but it is
+ * optional for hosts. Therefore we query the NVM sector
+ * size here and if it is supported assume NVM
+ * operations are implemented.
+ */
+ return usb4_switch_nvm_sector_size(sw) > 0;
+ }
+
+ /* Thunderbolt 2 and 3 devices support NVM through DMA port */
+ return !!sw->dma_port;
+}
+
+static inline bool nvm_upgradeable(struct tb_switch *sw)
+{
+ if (sw->no_nvm_upgrade)
+ return false;
+ return nvm_readable(sw);
+}
+
+static inline int nvm_read(struct tb_switch *sw, unsigned int address,
+ void *buf, size_t size)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_read(sw, address, buf, size);
+ return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_authenticate(struct tb_switch *sw)
+{
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_authenticate(sw);
+
+ if (!tb_route(sw)) {
+ nvm_authenticate_start_dma_port(sw);
+ ret = nvm_authenticate_host_dma_port(sw);
+ } else {
+ ret = nvm_authenticate_device_dma_port(sw);
+ }
+
+ return ret;
+}
+
static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -264,7 +338,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ ret = nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
@@ -341,8 +415,20 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
u32 val;
int ret;
- if (!sw->dma_port)
+ if (!nvm_readable(sw))
+ return 0;
+
+ /*
+ * The NVM format of non-Intel hardware is not known so
+ * currently restrict NVM upgrade for Intel hardware. We may
+ * relax this in the future when we learn other NVM formats.
+ */
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) {
+ dev_info(&sw->dev,
+ "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
+ sw->config.vendor_id);
return 0;
+ }
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -358,8 +444,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
if (!sw->safe_mode) {
u32 nvm_size, hdr_size;
- ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
- sizeof(val));
+ ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
goto err_ida;
@@ -367,8 +452,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - hdr_size) / 2;
- ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
- sizeof(val));
+ ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
if (ret)
goto err_ida;
@@ -620,6 +704,24 @@ int tb_port_clear_counter(struct tb_port *port, int counter)
}
/**
+ * tb_port_unlock() - Unlock downstream port
+ * @port: Port to unlock
+ *
+ * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
+ * downstream router accessible for CM.
+ */
+int tb_port_unlock(struct tb_port *port)
+{
+ if (tb_switch_is_icm(port->sw))
+ return 0;
+ if (!tb_port_is_null(port))
+ return -EINVAL;
+ if (tb_switch_is_usb4(port->sw))
+ return usb4_port_unlock(port);
+ return 0;
+}
+
+/**
* tb_init_port() - initialize a port
*
* This is a helper method for tb_switch_alloc. Does not check or initialize
@@ -650,6 +752,10 @@ static int tb_init_port(struct tb_port *port)
port->cap_phy = cap;
else
tb_port_WARN(port, "non switch port without a PHY\n");
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
+ if (cap > 0)
+ port->cap_usb4 = cap;
} else if (port->port != 0) {
cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
if (cap > 0)
@@ -936,12 +1042,47 @@ bool tb_port_is_enabled(struct tb_port *port)
case TB_TYPE_DP_HDMI_OUT:
return tb_dp_port_is_enabled(port);
+ case TB_TYPE_USB3_UP:
+ case TB_TYPE_USB3_DOWN:
+ return tb_usb3_port_is_enabled(port);
+
default:
return false;
}
}
/**
+ * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
+ * @port: USB3 adapter port to check
+ */
+bool tb_usb3_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1))
+ return false;
+
+ return !!(data & ADP_USB3_CS_0_PE);
+}
+
+/**
+ * tb_usb3_port_enable() - Enable USB3 adapter port
+ * @port: USB3 adapter port to enable
+ * @enable: Enable/disable the USB3 adapter
+ */
+int tb_usb3_port_enable(struct tb_port *port, bool enable)
+{
+ u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
+ : ADP_USB3_CS_0_V;
+
+ if (!port->cap_adap)
+ return -ENXIO;
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1);
+}
+
+/**
* tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
* @port: PCIe port to check
*/
@@ -1088,20 +1229,38 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
/* switch utility functions */
-static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+static const char *tb_switch_generation_name(const struct tb_switch *sw)
+{
+ switch (sw->generation) {
+ case 1:
+ return "Thunderbolt 1";
+ case 2:
+ return "Thunderbolt 2";
+ case 3:
+ return "Thunderbolt 3";
+ case 4:
+ return "USB4";
+ default:
+ return "Unknown";
+ }
+}
+
+static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
{
- tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
- sw->vendor_id, sw->device_id, sw->revision,
- sw->thunderbolt_version);
- tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
+ const struct tb_regs_switch_header *regs = &sw->config;
+
+ tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
+ regs->revision, regs->thunderbolt_version);
+ tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
tb_dbg(tb, " Config:\n");
tb_dbg(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
- sw->upstream_port_number, sw->depth,
- (((u64) sw->route_hi) << 32) | sw->route_lo,
- sw->enabled, sw->plug_events_delay);
+ regs->upstream_port_number, regs->depth,
+ (((u64) regs->route_hi) << 32) | regs->route_lo,
+ regs->enabled, regs->plug_events_delay);
tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
- sw->__unknown1, sw->__unknown4);
+ regs->__unknown1, regs->__unknown4);
}
/**
@@ -1148,6 +1307,10 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (res)
return res;
+ /* Plug events are always enabled in USB4 */
+ if (tb_switch_is_usb4(sw))
+ return 0;
+
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
@@ -1359,30 +1522,6 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
-static void nvm_authenticate_start(struct tb_switch *sw)
-{
- struct pci_dev *root_port;
-
- /*
- * During host router NVM upgrade we should not allow root port to
- * go into D3cold because some root ports cannot trigger PME
- * itself. To be on the safe side keep the root port in D0 during
- * the whole upgrade process.
- */
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
- if (root_port)
- pm_runtime_get_noresume(&root_port->dev);
-}
-
-static void nvm_authenticate_complete(struct tb_switch *sw)
-{
- struct pci_dev *root_port;
-
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
- if (root_port)
- pm_runtime_put(&root_port->dev);
-}
-
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1431,17 +1570,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
goto exit_unlock;
sw->nvm->authenticating = true;
-
- if (!tb_route(sw)) {
- /*
- * Keep root port from suspending as long as the
- * NVM upgrade process is running.
- */
- nvm_authenticate_start(sw);
- ret = nvm_authenticate_host(sw);
- } else {
- ret = nvm_authenticate_device(sw);
- }
+ ret = nvm_authenticate(sw);
}
exit_unlock:
@@ -1556,11 +1685,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
- if (sw->dma_port && !sw->no_nvm_upgrade)
+ if (nvm_upgradeable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_version.attr) {
- if (sw->dma_port)
+ if (nvm_readable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_boot.attr) {
@@ -1672,6 +1801,9 @@ static int tb_switch_get_generation(struct tb_switch *sw)
return 3;
default:
+ if (tb_switch_is_usb4(sw))
+ return 4;
+
/*
* For unknown switches assume generation to be 1 to be
* on the safe side.
@@ -1682,6 +1814,19 @@ static int tb_switch_get_generation(struct tb_switch *sw)
}
}
+static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
+{
+ int max_depth;
+
+ if (tb_switch_is_usb4(sw) ||
+ (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
+ max_depth = USB4_SWITCH_MAX_DEPTH;
+ else
+ max_depth = TB_SWITCH_MAX_DEPTH;
+
+ return depth > max_depth;
+}
+
/**
* tb_switch_alloc() - allocate a switch
* @tb: Pointer to the owning domain
@@ -1703,10 +1848,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
int upstream_port;
int i, ret, depth;
- /* Make sure we do not exceed maximum topology limit */
+ /* Unlock the downstream port so we can access the switch below */
+ if (route) {
+ struct tb_switch *parent_sw = tb_to_switch(parent);
+ struct tb_port *down;
+
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
+ }
+
depth = tb_route_length(route);
- if (depth > TB_SWITCH_MAX_DEPTH)
- return ERR_PTR(-EADDRNOTAVAIL);
upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
if (upstream_port < 0)
@@ -1721,8 +1872,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (ret)
goto err_free_sw_ports;
+ sw->generation = tb_switch_get_generation(sw);
+
tb_dbg(tb, "current switch config:\n");
- tb_dump_switch(tb, &sw->config);
+ tb_dump_switch(tb, sw);
/* configure switch */
sw->config.upstream_port_number = upstream_port;
@@ -1731,6 +1884,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->config.route_lo = lower_32_bits(route);
sw->config.enabled = 0;
+ /* Make sure we do not exceed maximum topology limit */
+ if (tb_switch_exceeds_max_depth(sw, depth)) {
+ ret = -EADDRNOTAVAIL;
+ goto err_free_sw_ports;
+ }
+
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
@@ -1745,14 +1904,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->ports[i].port = i;
}
- sw->generation = tb_switch_get_generation(sw);
-
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
- if (ret < 0) {
- tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
- goto err_free_sw_ports;
- }
- sw->cap_plug_events = ret;
+ if (ret > 0)
+ sw->cap_plug_events = ret;
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
if (ret > 0)
@@ -1823,7 +1977,8 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
*
* Call this function before the switch is added to the system. It will
* upload configuration to the switch and makes it available for the
- * connection manager to use.
+ * connection manager to use. Can be called to the switch again after
+ * resume from low power states to re-initialize it.
*
* Return: %0 in case of success and negative errno in case of failure
*/
@@ -1834,21 +1989,50 @@ int tb_switch_configure(struct tb_switch *sw)
int ret;
route = tb_route(sw);
- tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
- route, tb_route_length(route), sw->config.upstream_port_number);
- if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
- tb_sw_warn(sw, "unknown switch vendor id %#x\n",
- sw->config.vendor_id);
+ tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
+ sw->config.enabled ? "restoring " : "initializing", route,
+ tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1;
- /* upload configuration */
- ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
- if (ret)
- return ret;
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * For USB4 devices, we need to program the CM version
+ * accordingly so that it knows to expose all the
+ * additional capabilities.
+ */
+ sw->config.cmuv = USB4_VERSION_1_0;
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 4);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_setup(sw);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_configure_link(sw);
+ } else {
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+ tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+ sw->config.vendor_id);
- ret = tb_lc_configure_link(sw);
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+ return -ENODEV;
+ }
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 3);
+ if (ret)
+ return ret;
+
+ ret = tb_lc_configure_link(sw);
+ }
if (ret)
return ret;
@@ -1857,18 +2041,32 @@ int tb_switch_configure(struct tb_switch *sw)
static int tb_switch_set_uuid(struct tb_switch *sw)
{
+ bool uid = false;
u32 uuid[4];
int ret;
if (sw->uuid)
return 0;
- /*
- * The newer controllers include fused UUID as part of link
- * controller specific registers
- */
- ret = tb_lc_read_uuid(sw, uuid);
- if (ret) {
+ if (tb_switch_is_usb4(sw)) {
+ ret = usb4_switch_read_uid(sw, &sw->uid);
+ if (ret)
+ return ret;
+ uid = true;
+ } else {
+ /*
+ * The newer controllers include fused UUID as part of
+ * link controller specific registers
+ */
+ ret = tb_lc_read_uuid(sw, uuid);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ uid = true;
+ }
+ }
+
+ if (uid) {
/*
* ICM generates UUID based on UID and fills the upper
* two words with ones. This is not strictly following
@@ -1935,7 +2133,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
nvm_get_auth_status(sw, &status);
if (status) {
if (!tb_route(sw))
- nvm_authenticate_complete(sw);
+ nvm_authenticate_complete_dma_port(sw);
return 0;
}
@@ -1950,7 +2148,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
/* Now we can allow root port to suspend again */
if (!tb_route(sw))
- nvm_authenticate_complete(sw);
+ nvm_authenticate_complete_dma_port(sw);
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
@@ -2004,6 +2202,8 @@ static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
if (!up->dual_link_port || !up->dual_link_port->remote)
return false;
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_lane_bonding_possible(sw);
return tb_lc_lane_bonding_possible(sw);
}
@@ -2175,6 +2375,10 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_update_link_attributes(sw);
if (ret)
return ret;
+
+ ret = tb_switch_tmu_init(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
@@ -2240,7 +2444,11 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
- tb_lc_unconfigure_link(sw);
+
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_unconfigure_link(sw);
+ else
+ tb_lc_unconfigure_link(sw);
tb_switch_nvm_remove(sw);
@@ -2298,7 +2506,10 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
}
- err = tb_drom_read_uid_only(sw, &uid);
+ if (tb_switch_is_usb4(sw))
+ err = usb4_switch_read_uid(sw, &uid);
+ else
+ err = tb_drom_read_uid_only(sw, &uid);
if (err) {
tb_sw_warn(sw, "uid read failed\n");
return err;
@@ -2311,16 +2522,7 @@ int tb_switch_resume(struct tb_switch *sw)
}
}
- /* upload configuration */
- err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
- if (err)
- return err;
-
- err = tb_lc_configure_link(sw);
- if (err)
- return err;
-
- err = tb_plug_events_active(sw, true);
+ err = tb_switch_configure(sw);
if (err)
return err;
@@ -2336,8 +2538,14 @@ int tb_switch_resume(struct tb_switch *sw)
tb_sw_set_unplugged(port->remote->sw);
else if (port->xdomain)
port->xdomain->is_unplugged = true;
- } else if (tb_port_has_remote(port)) {
- if (tb_switch_resume(port->remote->sw)) {
+ } else if (tb_port_has_remote(port) || port->xdomain) {
+ /*
+ * Always unlock the port so the downstream
+ * switch/domain is accessible.
+ */
+ if (tb_port_unlock(port))
+ tb_port_warn(port, "failed to unlock port\n");
+ if (port->remote && tb_switch_resume(port->remote->sw)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
@@ -2361,7 +2569,10 @@ void tb_switch_suspend(struct tb_switch *sw)
tb_switch_suspend(port->remote->sw);
}
- tb_lc_set_sleep(sw);
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_set_sleep(sw);
+ else
+ tb_lc_set_sleep(sw);
}
/**
@@ -2374,6 +2585,8 @@ void tb_switch_suspend(struct tb_switch *sw)
*/
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_query_dp_resource(sw, in);
return tb_lc_dp_sink_query(sw, in);
}
@@ -2388,6 +2601,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_alloc_dp_resource(sw, in);
return tb_lc_dp_sink_alloc(sw, in);
}
@@ -2401,10 +2616,16 @@ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
- if (tb_lc_dp_sink_dealloc(sw, in)) {
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ ret = usb4_switch_dealloc_dp_resource(sw, in);
+ else
+ ret = tb_lc_dp_sink_dealloc(sw, in);
+
+ if (ret)
tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
in->port);
- }
}
struct tb_sw_lookup {
@@ -2517,6 +2738,24 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
return NULL;
}
+/**
+ * tb_switch_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
+ */
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
+ return NULL;
+}
+
void tb_switch_exit(void)
{
ida_destroy(&nvm_ida);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index ea8727f769d6..107cd232f486 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -111,6 +111,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
tunnel = tb_tunnel_discover_pci(tb, port);
break;
+ case TB_TYPE_USB3_DOWN:
+ tunnel = tb_tunnel_discover_usb3(tb, port);
+ break;
+
default:
break;
}
@@ -158,6 +162,137 @@ static void tb_scan_xdomain(struct tb_port *port)
}
}
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+ int ret;
+
+ /* If it is already enabled in correct mode, don't touch it */
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_disable(sw);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_post_time(sw);
+ if (ret)
+ return ret;
+
+ return tb_switch_tmu_enable(sw);
+}
+
+/**
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
+ */
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
+ continue;
+ if (port->config.type != type)
+ continue;
+ if (!port->cap_adap)
+ continue;
+ if (tb_port_is_enabled(port))
+ continue;
+ return port;
+ }
+ return NULL;
+}
+
+static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ struct tb_port *down;
+
+ down = usb4_switch_map_usb3_down(sw, port);
+ if (down) {
+ if (WARN_ON(!tb_port_is_usb3_down(down)))
+ goto out;
+ if (WARN_ON(tb_usb3_port_is_enabled(down)))
+ goto out;
+
+ return down;
+ }
+
+out:
+ return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
+}
+
+static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
+ if (!up)
+ return 0;
+
+ /*
+ * Look up available down port. Since we are chaining it should
+ * be found right above this switch.
+ */
+ port = tb_port_at(tb_route(sw), parent);
+ down = tb_find_usb3_down(parent, port);
+ if (!down)
+ return 0;
+
+ if (tb_route(parent)) {
+ struct tb_port *parent_up;
+ /*
+ * Check first that the parent switch has its upstream USB3
+ * port enabled. Otherwise the chain is not complete and
+ * there is no point setting up a new tunnel.
+ */
+ parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
+ if (!parent_up || !tb_port_is_enabled(parent_up))
+ return 0;
+ }
+
+ tunnel = tb_tunnel_alloc_usb3(tb, up, down);
+ if (!tunnel)
+ return -ENOMEM;
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(up,
+ "USB3 tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ return -EIO;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return 0;
+}
+
+static int tb_create_usb3_tunnels(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_route(sw)) {
+ ret = tb_tunnel_usb3(sw->tb, sw);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+ ret = tb_create_usb3_tunnels(port->remote->sw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void tb_scan_port(struct tb_port *port);
/**
@@ -257,6 +392,18 @@ static void tb_scan_port(struct tb_port *port)
if (tb_switch_lane_bonding_enable(sw))
tb_sw_warn(sw, "failed to enable lane bonding\n");
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to enable TMU\n");
+
+ /*
+ * Create USB 3.x tunnels only when the switch is plugged to the
+ * domain. This is because we scan the domain also during discovery
+ * and want to discover existing USB 3.x tunnels before we create
+ * any new.
+ */
+ if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
+ tb_sw_warn(sw, "USB3 tunnel creation failed\n");
+
tb_scan_switch(sw);
}
@@ -338,57 +485,18 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
}
}
-/**
- * tb_find_port() - return the first port of @type on @sw or NULL
- * @sw: Switch to find the port from
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_port(struct tb_switch *sw,
- enum tb_port_type type)
-{
- struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (port->config.type == type)
- return port;
- }
-
- return NULL;
-}
-
-/**
- * tb_find_unused_port() - return the first inactive port on @sw
- * @sw: Switch to find the port on
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
- enum tb_port_type type)
-{
- struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (tb_is_upstream_port(port))
- continue;
- if (port->config.type != type)
- continue;
- if (port->cap_adap)
- continue;
- if (tb_port_is_enabled(port))
- continue;
- return port;
- }
- return NULL;
-}
-
static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
+ struct tb_port *down = NULL;
+
/*
* To keep plugging devices consistently in the same PCIe
- * hierarchy, do mapping here for root switch downstream PCIe
- * ports.
+ * hierarchy, do mapping here for switch downstream PCIe ports.
*/
- if (!tb_route(sw)) {
+ if (tb_switch_is_usb4(sw)) {
+ down = usb4_switch_map_pcie_down(sw, port);
+ } else if (!tb_route(sw)) {
int phy_port = tb_phy_port_from_link(port->port);
int index;
@@ -409,12 +517,17 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
/* Validate the hard-coding */
if (WARN_ON(index > sw->config.max_port_number))
goto out;
- if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+
+ down = &sw->ports[index];
+ }
+
+ if (down) {
+ if (WARN_ON(!tb_port_is_pcie_down(down)))
goto out;
- if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+ if (WARN_ON(tb_pci_port_is_enabled(down)))
goto out;
- return &sw->ports[index];
+ return down;
}
out:
@@ -586,7 +699,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
struct tb_switch *parent_sw;
struct tb_tunnel *tunnel;
- up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+ up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
if (!up)
return 0;
@@ -624,7 +737,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
- nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+ nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
@@ -719,6 +832,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_remove_dp_resources(port->remote->sw);
+ tb_switch_tmu_disable(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
@@ -786,8 +900,7 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
route = tb_cfg_get_route(&pkg->header);
- if (tb_cfg_error(tb->ctl, route, pkg->port,
- TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+ if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port);
}
@@ -866,10 +979,17 @@ static int tb_start(struct tb *tb)
return ret;
}
+ /* Enable TMU if it is off */
+ tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /*
+ * If the boot firmware did not create USB 3.x tunnels create them
+ * now for the whole topology.
+ */
+ tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
@@ -897,6 +1017,9 @@ static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to restore TMU configuration\n");
+
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ec851f20c571..2eb2bcd3cca3 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -44,6 +44,39 @@ struct tb_switch_nvm {
#define TB_SWITCH_KEY_SIZE 32
#define TB_SWITCH_MAX_DEPTH 6
+#define USB4_SWITCH_MAX_DEPTH 5
+
+/**
+ * enum tb_switch_tmu_rate - TMU refresh rate
+ * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
+ * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
+ * transmission of the Delay Request TSNOS
+ * (Time Sync Notification Ordered Set) on a Link
+ * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
+ * transmission of the Delay Request TSNOS on
+ * a Link
+ */
+enum tb_switch_tmu_rate {
+ TB_SWITCH_TMU_RATE_OFF = 0,
+ TB_SWITCH_TMU_RATE_HIFI = 16,
+ TB_SWITCH_TMU_RATE_NORMAL = 1000,
+};
+
+/**
+ * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * @cap: Offset to the TMU capability (%0 if not found)
+ * @has_ucap: Does the switch support uni-directional mode
+ * @rate: TMU refresh rate related to upstream switch. In case of root
+ * switch this holds the domain rate.
+ * @unidirectional: Is the TMU in uni-directional or bi-directional mode
+ * related to upstream switch. Don't case for root switch.
+ */
+struct tb_switch_tmu {
+ int cap;
+ bool has_ucap;
+ enum tb_switch_tmu_rate rate;
+ bool unidirectional;
+};
/**
* struct tb_switch - a thunderbolt switch
@@ -54,6 +87,7 @@ struct tb_switch_nvm {
* mailbox this will hold the pointer to that (%NULL
* otherwise). If set it also means the switch has
* upgradeable NVM.
+ * @tmu: The switch TMU configuration
* @tb: Pointer to the domain the switch belongs to
* @uid: Unique ID of the switch
* @uuid: UUID of the switch (or %NULL if not supported)
@@ -92,6 +126,7 @@ struct tb_switch {
struct tb_regs_switch_header config;
struct tb_port *ports;
struct tb_dma_port *dma_port;
+ struct tb_switch_tmu tmu;
struct tb *tb;
u64 uid;
uuid_t *uuid;
@@ -128,7 +163,9 @@ struct tb_switch {
* @remote: Remote port (%NULL if not connected)
* @xdomain: Remote host (%NULL if not connected)
* @cap_phy: Offset, zero if not found
+ * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
+ * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
* @bonded: true if the port is bonded (two lanes combined as one)
@@ -145,7 +182,9 @@ struct tb_port {
struct tb_port *remote;
struct tb_xdomain *xdomain;
int cap_phy;
+ int cap_tmu;
int cap_adap;
+ int cap_usb4;
u8 port;
bool disabled;
bool bonded;
@@ -393,6 +432,16 @@ static inline bool tb_port_is_dpout(const struct tb_port *port)
return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
}
+static inline bool tb_port_is_usb3_down(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_DOWN;
+}
+
+static inline bool tb_port_is_usb3_up(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_UP;
+}
+
static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
@@ -533,6 +582,8 @@ void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -636,6 +687,17 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
}
/**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+ return sw->config.thunderbolt_version == USB4_VERSION_1_0;
+}
+
+/**
* tb_switch_is_icm() - Is the switch handled by ICM firmware
* @sw: Switch to check
*
@@ -656,10 +718,22 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_tmu_init(struct tb_switch *sw);
+int tb_switch_tmu_post_time(struct tb_switch *sw);
+int tb_switch_tmu_disable(struct tb_switch *sw);
+int tb_switch_tmu_enable(struct tb_switch *sw);
+
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+{
+ return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+ !sw->tmu.unidirectional;
+}
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_unlock(struct tb_port *port);
int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
void tb_port_release_in_hopid(struct tb_port *port, int hopid);
int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@@ -668,9 +742,13 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
bool tb_port_is_enabled(struct tb_port *port);
+bool tb_usb3_port_is_enabled(struct tb_port *port);
+int tb_usb3_port_enable(struct tb_port *port, bool enable);
+
bool tb_pci_port_is_enabled(struct tb_port *port);
int tb_pci_port_enable(struct tb_port *port, bool enable);
@@ -734,4 +812,27 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
+int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_configure_link(struct tb_switch *sw);
+void usb4_switch_unconfigure_link(struct tb_switch *sw);
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_sleep(struct tb_switch *sw);
+int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size);
+int usb4_switch_nvm_authenticate(struct tb_switch *sw);
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port);
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port);
+
+int usb4_port_unlock(struct tb_port *port);
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 3705057723b6..fc208c567953 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -67,9 +67,13 @@ struct cfg_error_pkg {
u32 zero1:4;
u32 port:6;
u32 zero2:2; /* Both should be zero, still they are different fields. */
- u32 zero3:16;
+ u32 zero3:14;
+ u32 pg:2;
} __packed;
+#define TB_CFG_ERROR_PG_HOT_PLUG 0x2
+#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3
+
/* TB_CFG_PKG_EVENT */
struct cfg_event_pkg {
struct tb_cfg_header header;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 7ee45b73c7f7..c29c5075525a 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -26,6 +26,7 @@
#define TB_MAX_CONFIG_RW_LENGTH 60
enum tb_switch_cap {
+ TB_SWITCH_CAP_TMU = 0x03,
TB_SWITCH_CAP_VSE = 0x05,
};
@@ -41,6 +42,7 @@ enum tb_port_cap {
TB_PORT_CAP_TIME1 = 0x03,
TB_PORT_CAP_ADAP = 0x04,
TB_PORT_CAP_VSE = 0x05,
+ TB_PORT_CAP_USB4 = 0x06,
};
enum tb_port_state {
@@ -164,10 +166,52 @@ struct tb_regs_switch_header {
* milliseconds. Writing 0x00 is interpreted
* as 255ms.
*/
- u32 __unknown4:16;
+ u32 cmuv:8;
+ u32 __unknown4:8;
u32 thunderbolt_version:8;
} __packed;
+/* USB4 version 1.0 */
+#define USB4_VERSION_1_0 0x20
+
+#define ROUTER_CS_1 0x01
+#define ROUTER_CS_4 0x04
+#define ROUTER_CS_5 0x05
+#define ROUTER_CS_5_SLP BIT(0)
+#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_PTO BIT(24)
+#define ROUTER_CS_5_UTO BIT(25)
+#define ROUTER_CS_5_HCO BIT(26)
+#define ROUTER_CS_5_CV BIT(31)
+#define ROUTER_CS_6 0x06
+#define ROUTER_CS_6_SLPR BIT(0)
+#define ROUTER_CS_6_TNS BIT(1)
+#define ROUTER_CS_6_HCI BIT(18)
+#define ROUTER_CS_6_CR BIT(25)
+#define ROUTER_CS_7 0x07
+#define ROUTER_CS_9 0x09
+#define ROUTER_CS_25 0x19
+#define ROUTER_CS_26 0x1a
+#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24)
+#define ROUTER_CS_26_STATUS_SHIFT 24
+#define ROUTER_CS_26_ONS BIT(30)
+#define ROUTER_CS_26_OV BIT(31)
+
+/* Router TMU configuration */
+#define TMU_RTR_CS_0 0x00
+#define TMU_RTR_CS_0_TD BIT(27)
+#define TMU_RTR_CS_0_UCAP BIT(30)
+#define TMU_RTR_CS_1 0x01
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16
+#define TMU_RTR_CS_2 0x02
+#define TMU_RTR_CS_3 0x03
+#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
+#define TMU_RTR_CS_22 0x16
+#define TMU_RTR_CS_24 0x18
+
enum tb_port_type {
TB_TYPE_INACTIVE = 0x000000,
TB_TYPE_PORT = 0x000001,
@@ -178,7 +222,8 @@ enum tb_port_type {
TB_TYPE_DP_HDMI_OUT = 0x0e0102,
TB_TYPE_PCIE_DOWN = 0x100101,
TB_TYPE_PCIE_UP = 0x100102,
- /* TB_TYPE_USB = 0x200000, lower order bits are not known */
+ TB_TYPE_USB3_DOWN = 0x200101,
+ TB_TYPE_USB3_UP = 0x200102,
};
/* Present on every port in TB_CF_PORT at address zero. */
@@ -216,10 +261,15 @@ struct tb_regs_port_header {
#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_4_LCK BIT(31)
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
+/* TMU adapter registers */
+#define TMU_ADP_CS_3 0x03
+#define TMU_ADP_CS_3_UDM BIT(29)
+
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
@@ -237,6 +287,12 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
+/* USB4 port registers */
+#define PORT_CS_18 0x12
+#define PORT_CS_18_BE BIT(8)
+#define PORT_CS_19 0x13
+#define PORT_CS_19_PC BIT(3)
+
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
@@ -277,6 +333,11 @@ struct tb_regs_port_header {
#define ADP_PCIE_CS_0 0x00
#define ADP_PCIE_CS_0_PE BIT(31)
+/* USB adapter registers */
+#define ADP_USB3_CS_0 0x00
+#define ADP_USB3_CS_0_V BIT(30)
+#define ADP_USB3_CS_0_PE BIT(31)
+
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
/* DWORD 0 */
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
new file mode 100644
index 000000000000..039c42a06000
--- /dev/null
+++ b/drivers/thunderbolt/tmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Time Management Unit (TMU) support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+
+#include "tb.h"
+
+static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
+{
+ bool root_switch = !tb_route(sw);
+
+ switch (sw->tmu.rate) {
+ case TB_SWITCH_TMU_RATE_OFF:
+ return "off";
+
+ case TB_SWITCH_TMU_RATE_HIFI:
+ /* Root switch does not have upstream directionality */
+ if (root_switch)
+ return "HiFi";
+ if (sw->tmu.unidirectional)
+ return "uni-directional, HiFi";
+ return "bi-directional, HiFi";
+
+ case TB_SWITCH_TMU_RATE_NORMAL:
+ if (root_switch)
+ return "normal";
+ return "uni-directional, normal";
+
+ default:
+ return "unknown";
+ }
+}
+
+static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TMU_RTR_CS_0_UCAP);
+}
+
+static int tb_switch_tmu_rate_read(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+ return val;
+}
+
+static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
+ val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+}
+
+static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
+ u32 value)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
+ if (ret)
+ return ret;
+
+ data &= ~mask;
+ data |= value;
+
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_tmu + offset, 1);
+}
+
+static int tb_port_tmu_set_unidirectional(struct tb_port *port,
+ bool unidirectional)
+{
+ u32 val;
+
+ if (!port->sw->tmu.has_ucap)
+ return 0;
+
+ val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
+ return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
+}
+
+static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
+{
+ return tb_port_tmu_set_unidirectional(port, false);
+}
+
+static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_3, 1);
+ if (ret)
+ return false;
+
+ return val & TMU_ADP_CS_3_UDM;
+}
+
+static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return ret;
+
+ if (set)
+ val |= TMU_RTR_CS_0_TD;
+ else
+ val &= ~TMU_RTR_CS_0_TD;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+}
+
+/**
+ * tb_switch_tmu_init() - Initialize switch TMU structures
+ * @sw: Switch to initialized
+ *
+ * This function must be called before other TMU related functions to
+ * makes the internal structures are filled in correctly. Does not
+ * change any hardware configuration.
+ */
+int tb_switch_tmu_init(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
+ if (ret > 0)
+ sw->tmu.cap = ret;
+
+ tb_switch_for_each_port(sw, port) {
+ int cap;
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
+ if (cap > 0)
+ port->cap_tmu = cap;
+ }
+
+ ret = tb_switch_tmu_rate_read(sw);
+ if (ret < 0)
+ return ret;
+
+ sw->tmu.rate = ret;
+
+ sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
+ if (sw->tmu.has_ucap) {
+ tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+
+ if (tb_route(sw)) {
+ struct tb_port *up = tb_upstream_port(sw);
+
+ sw->tmu.unidirectional =
+ tb_port_tmu_is_unidirectional(up);
+ }
+ } else {
+ sw->tmu.unidirectional = false;
+ }
+
+ tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_post_time() - Update switch local time
+ * @sw: Switch whose time to update
+ *
+ * Updates switch local time using time posting procedure.
+ */
+int tb_switch_tmu_post_time(struct tb_switch *sw)
+{
+ unsigned int post_local_time_offset, post_time_offset;
+ struct tb_switch *root_switch = sw->tb->root_switch;
+ u64 hi, mid, lo, local_time, post_time;
+ int i, ret, retries = 100;
+ u32 gm_local_time[3];
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Need to be able to read the grand master time */
+ if (!root_switch->tmu.cap)
+ return 0;
+
+ ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
+ root_switch->tmu.cap + TMU_RTR_CS_1,
+ ARRAY_SIZE(gm_local_time));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
+ tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+ gm_local_time[i]);
+
+ /* Convert to nanoseconds (drop fractional part) */
+ hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
+ mid = gm_local_time[1];
+ lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
+ TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
+ local_time = hi << 48 | mid << 16 | lo;
+
+ /* Tell the switch that time sync is disrupted for a while */
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
+ post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+
+ /*
+ * Write the Grandmaster time to the Post Local Time registers
+ * of the new switch.
+ */
+ ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
+ post_local_time_offset, 2);
+ if (ret)
+ goto out;
+
+ /*
+ * Have the new switch update its local time (by writing 1 to
+ * the post_time registers) and wait for the completion of the
+ * same (post_time register becomes 0). This means the time has
+ * been converged properly.
+ */
+ post_time = 1;
+
+ ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
+ if (ret)
+ goto out;
+
+ do {
+ usleep_range(5, 10);
+ ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
+ post_time_offset, 2);
+ if (ret)
+ goto out;
+ } while (--retries && post_time);
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
+
+out:
+ tb_switch_tmu_set_time_disruption(sw, false);
+ return ret;
+}
+
+/**
+ * tb_switch_tmu_disable() - Disable TMU of a switch
+ * @sw: Switch whose TMU to disable
+ *
+ * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ */
+int tb_switch_tmu_disable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Already disabled? */
+ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+ return 0;
+
+ if (sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ /* The switch may be unplugged so ignore any errors */
+ tb_port_tmu_unidirectional_disable(up);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+
+ tb_sw_dbg(sw, "TMU: disabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a switch
+ * @sw: Switch whose TMU to enable
+ *
+ * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
+ * all tunneling should work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ /* Change mode to bi-directional */
+ if (tb_route(sw) && sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_unidirectional_disable(up);
+ if (ret)
+ return ret;
+ } else {
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+ }
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+
+ return tb_switch_tmu_set_time_disruption(sw, false);
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 0d3463c4e24a..dbe90bcf4ad4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -19,6 +19,12 @@
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
+/* USB3 adapters use always HopID of 8 for both directions */
+#define TB_USB3_HOPID 8
+
+#define TB_USB3_PATH_DOWN 0
+#define TB_USB3_PATH_UP 1
+
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
@@ -31,7 +37,7 @@
#define TB_DMA_PATH_OUT 0
#define TB_DMA_PATH_IN 1
-static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
@@ -243,6 +249,12 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static bool tb_dp_is_usb4(const struct tb_switch *sw)
+{
+ /* Titan Ridge DP adapters need the same treatment as USB4 */
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
{
int timeout = 10;
@@ -250,8 +262,7 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
int ret;
/* Both ends need to support this */
- if (!tb_switch_is_titan_ridge(in->sw) ||
- !tb_switch_is_titan_ridge(out->sw))
+ if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
return 0;
ret = tb_port_read(out, &val, TB_CFG_PORT,
@@ -531,7 +542,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
u32 val, rate = 0, lanes = 0;
int ret;
- if (tb_switch_is_titan_ridge(sw)) {
+ if (tb_dp_is_usb4(sw)) {
int timeout = 10;
/*
@@ -843,6 +854,156 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
}
+static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+{
+ int res;
+
+ res = tb_usb3_port_enable(tunnel->src_port, activate);
+ if (res)
+ return res;
+
+ if (tb_port_is_usb3_up(tunnel->dst_port))
+ return tb_usb3_port_enable(tunnel->dst_port, activate);
+
+ return 0;
+}
+
+static void tb_usb3_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 3;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+ path->hops[0].initial_credits = 7;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
+ * @tb: Pointer to the domain structure
+ * @down: USB3 downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the USB3 upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ if (!tb_usb3_port_is_enabled(down))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+
+ /*
+ * Discover both paths even if they are not complete. We will
+ * clean them up by calling tb_tunnel_deactivate() below in that
+ * case.
+ */
+ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
+ &tunnel->dst_port, "USB3 Up");
+ if (!path) {
+ /* Just disable the downstream port */
+ tb_usb3_port_enable(down, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
+ "USB3 Down");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+ tb_port_warn(tunnel->dst_port,
+ "path does not end on an USB3 adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (down != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
+ tb_tunnel_warn(tunnel,
+ "tunnel is not fully activated, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
+ * @tb: Pointer to the domain structure
+ * @up: USB3 upstream adapter port
+ * @down: USB3 downstream adapter port
+ *
+ * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
+ * @TB_TYPE_USB3_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or %NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+ tunnel->dst_port = up;
+
+ path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
+ "USB3 Down");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+
+ path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
+ "USB3 Up");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+
+ return tunnel;
+}
+
/**
* tb_tunnel_free() - free a tunnel
* @tunnel: Tunnel to be freed
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index ba888da005f5..3f5ba93225e7 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -15,6 +15,7 @@ enum tb_tunnel_type {
TB_TUNNEL_PCI,
TB_TUNNEL_DP,
TB_TUNNEL_DMA,
+ TB_TUNNEL_USB3,
};
/**
@@ -57,6 +58,9 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
int receive_path);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
void tb_tunnel_free(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
@@ -82,5 +86,10 @@ static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_DMA;
}
+static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_USB3;
+}
+
#endif
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
new file mode 100644
index 000000000000..b341fc60c4ba
--- /dev/null
+++ b/drivers/thunderbolt/usb4.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 specific functionality
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "tb.h"
+
+#define USB4_DATA_DWORDS 16
+#define USB4_DATA_RETRIES 3
+
+enum usb4_switch_op {
+ USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
+ USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
+ USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
+ USB4_SWITCH_OP_NVM_WRITE = 0x20,
+ USB4_SWITCH_OP_NVM_AUTH = 0x21,
+ USB4_SWITCH_OP_NVM_READ = 0x22,
+ USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
+ USB4_SWITCH_OP_DROM_READ = 0x24,
+ USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+};
+
+#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
+#define USB4_NVM_READ_OFFSET_SHIFT 2
+#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
+#define USB4_NVM_READ_LENGTH_SHIFT 24
+
+#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
+#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
+
+#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
+#define USB4_DROM_ADDRESS_SHIFT 2
+#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
+#define USB4_DROM_SIZE_SHIFT 15
+
+#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
+
+typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t);
+
+static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
+{
+ return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
+{
+ return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
+ void *buf, size_t size, read_block_fn read_block)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
+ unsigned int dwaddress, dwords;
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ dwaddress = address / 4;
+ dwords = ALIGN(nbytes, 4) / 4;
+
+ ret = read_block(sw, dwaddress, data, dwords);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ memcpy(buf, data + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address,
+ const void *buf, size_t size, write_block_fn write_next_block)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ memcpy(data + offset, buf, nbytes);
+
+ ret = write_next_block(sw, data, nbytes / 4);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+{
+ u32 val;
+ int ret;
+
+ val = opcode | ROUTER_CS_26_OV;
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (val & ROUTER_CS_26_ONS)
+ return -EOPNOTSUPP;
+
+ *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
+ return 0;
+}
+
+/**
+ * usb4_switch_setup() - Additional setup for USB4 device
+ * @sw: USB4 router to setup
+ *
+ * USB4 routers need additional settings in order to enable all the
+ * tunneling. This function enables USB and PCIe tunneling if it can be
+ * enabled (e.g the parent switch also supports them). If USB tunneling
+ * is not available for some reason (like that there is Thunderbolt 3
+ * switch upstream) then the internal xHCI controller is enabled
+ * instead.
+ */
+int usb4_switch_setup(struct tb_switch *sw)
+{
+ struct tb_switch *parent;
+ bool tbt3, xhci;
+ u32 val = 0;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
+ if (ret)
+ return ret;
+
+ xhci = val & ROUTER_CS_6_HCI;
+ tbt3 = !(val & ROUTER_CS_6_TNS);
+
+ tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
+ tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ parent = tb_switch_parent(sw);
+
+ if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+ val |= ROUTER_CS_5_UTO;
+ xhci = false;
+ }
+
+ /* Only enable PCIe tunneling if the parent router supports it */
+ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+ val |= ROUTER_CS_5_PTO;
+ /*
+ * xHCI can be enabled if PCIe tunneling is supported
+ * and the parent does not have any USB3 dowstream
+ * adapters (so we cannot do USB 3.x tunneling).
+ */
+ if (xhci)
+ val |= ROUTER_CS_5_HCO;
+ }
+
+ /* TBT3 supported by the CM */
+ val |= ROUTER_CS_5_C3S;
+ /* Tunneling configuration is ready now */
+ val |= ROUTER_CS_5_CV;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+ ROUTER_CS_6_CR, 50);
+}
+
+/**
+ * usb4_switch_read_uid() - Read UID from USB4 router
+ * @sw: USB4 router
+ *
+ * Reads 64-bit UID from USB4 router config space.
+ */
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
+{
+ return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
+}
+
+static int usb4_switch_drom_read_block(struct tb_switch *sw,
+ unsigned int dwaddress, void *buf,
+ size_t dwords)
+{
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
+ metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
+ USB4_DROM_ADDRESS_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
+ * @sw: USB4 router
+ *
+ * Uses USB4 router operations to read router DROM. For devices this
+ * should always work but for hosts it may return %-EOPNOTSUPP in which
+ * case the host router does not have DROM.
+ */
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_switch_do_read_data(sw, address, buf, size,
+ usb4_switch_drom_read_block);
+}
+
+static int usb4_set_port_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PC;
+ else
+ val &= ~PORT_CS_19_PC;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_switch_configure_link() - Set upstream USB4 link configured
+ * @sw: USB4 router
+ *
+ * Sets the upstream USB4 link to be configured for power management
+ * purposes.
+ */
+int usb4_switch_configure_link(struct tb_switch *sw)
+{
+ struct tb_port *up;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ return usb4_set_port_configured(up, true);
+}
+
+/**
+ * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
+ * @sw: USB4 router
+ *
+ * Reverse of usb4_switch_configure_link().
+ */
+void usb4_switch_unconfigure_link(struct tb_switch *sw)
+{
+ struct tb_port *up;
+
+ if (sw->is_unplugged || !tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ usb4_set_port_configured(up, false);
+}
+
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int ret;
+ u32 val;
+
+ up = tb_upstream_port(sw);
+ ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
+ if (ret)
+ return false;
+
+ return !!(val & PORT_CS_18_BE);
+}
+
+/**
+ * usb4_switch_set_sleep() - Prepare the router to enter sleep
+ * @sw: USB4 router
+ *
+ * Enables wakes and sets sleep bit for the router. Returns when the
+ * router sleep ready bit has been asserted.
+ */
+int usb4_switch_set_sleep(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ /* Set sleep bit and wait for sleep ready to be asserted */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val |= ROUTER_CS_5_SLP;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+ ROUTER_CS_6_SLPR, 500);
+}
+
+/**
+ * usb4_switch_nvm_sector_size() - Return router NVM sector size
+ * @sw: USB4 router
+ *
+ * If the router supports NVM operations this function returns the NVM
+ * sector size in bytes. If NVM operations are not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_sector_size(struct tb_switch *sw)
+{
+ u32 metadata;
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return status == 0x2 ? -EOPNOTSUPP : -EIO;
+
+ ret = usb4_switch_op_read_metadata(sw, &metadata);
+ if (ret)
+ return ret;
+
+ return metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_switch_nvm_read_block(struct tb_switch *sw,
+ unsigned int dwaddress, void *buf, size_t dwords)
+{
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
+ USB4_NVM_READ_LENGTH_MASK;
+ metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
+ USB4_NVM_READ_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
+ * @sw: USB4 router
+ * @address: Starting address in bytes
+ * @buf: Read data is placed here
+ * @size: How many bytes to read
+ *
+ * Reads NVM contents of the router. If NVM is not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_switch_do_read_data(sw, address, buf, size,
+ usb4_switch_nvm_read_block);
+}
+
+static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
+ unsigned int address)
+{
+ u32 metadata, dwaddress;
+ u8 status = 0;
+ int ret;
+
+ dwaddress = address / 4;
+ metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+ USB4_NVM_SET_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_switch_nvm_write_next_block(struct tb_switch *sw,
+ const void *buf, size_t dwords)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_data(sw, buf, dwords);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+/**
+ * usb4_switch_nvm_write() - Write to the router NVM
+ * @sw: USB4 router
+ * @address: Start address where to write in bytes
+ * @buf: Pointer to the data to write
+ * @size: Size of @buf in bytes
+ *
+ * Writes @buf to the router NVM using USB4 router operations. If NVM
+ * write is not supported returns %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size)
+{
+ int ret;
+
+ ret = usb4_switch_nvm_set_offset(sw, address);
+ if (ret)
+ return ret;
+
+ return usb4_switch_do_write_data(sw, address, buf, size,
+ usb4_switch_nvm_write_next_block);
+}
+
+/**
+ * usb4_switch_nvm_authenticate() - Authenticate new NVM
+ * @sw: USB4 router
+ *
+ * After the new NVM has been written via usb4_switch_nvm_write(), this
+ * function triggers NVM authentication process. If the authentication
+ * is successful the router is power cycled and the new NVM starts
+ * running. In case of failure returns negative errno.
+ */
+int usb4_switch_nvm_authenticate(struct tb_switch *sw)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case 0x0:
+ tb_sw_dbg(sw, "NVM authentication successful\n");
+ return 0;
+ case 0x1:
+ return -EINVAL;
+ case 0x2:
+ return -EAGAIN;
+ case 0x3:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * usb4_switch_query_dp_resource() - Query availability of DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * For DP tunneling this function can be used to query availability of
+ * DP IN resource. Returns true if the resource is available for DP
+ * tunneling, false otherwise.
+ */
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return false;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
+ /*
+ * If DP resource allocation is not supported assume it is
+ * always available.
+ */
+ if (ret == -EOPNOTSUPP)
+ return true;
+ else if (ret)
+ return false;
+
+ return !status;
+}
+
+/**
+ * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Allocates DP IN resource for DP tunneling using USB4 router
+ * operations. If the resource was allocated returns %0. Otherwise
+ * returns negative errno, in particular %-EBUSY if the resource is
+ * already allocated.
+ */
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EBUSY : 0;
+}
+
+/**
+ * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Releases the previously allocated DP IN resource.
+ */
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+{
+ struct tb_port *p;
+ int usb4_idx = 0;
+
+ /* Assume port is primary */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_null(p))
+ continue;
+ if (tb_is_upstream_port(p))
+ continue;
+ if (!p->link_nr) {
+ if (p == port)
+ break;
+ usb4_idx++;
+ }
+ }
+
+ return usb4_idx;
+}
+
+/**
+ * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and PCIe
+ * downstream adapters where the PCIe topology is extended. This
+ * function returns the corresponding downstream PCIe adapter or %NULL
+ * if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int pcie_idx = 0;
+
+ /* Find PCIe down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_pcie_down(p))
+ continue;
+
+ if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p))
+ return p;
+
+ pcie_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and USB 3.x
+ * downstream adapters where the USB 3.x topology is extended. This
+ * function returns the corresponding downstream USB 3.x adapter or
+ * %NULL if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int usb_idx = 0;
+
+ /* Find USB3 down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_usb3_down(p))
+ continue;
+
+ if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p))
+ return p;
+
+ usb_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_port_unlock() - Unlock USB4 downstream port
+ * @port: USB4 port to unlock
+ *
+ * Unlocks USB4 downstream port so that the connection manager can
+ * access the router below this port.
+ */
+int usb4_port_unlock(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_4_LCK;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 880d784398a3..053f918e00e8 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1220,7 +1220,13 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
u64 route, const uuid_t *local_uuid,
const uuid_t *remote_uuid)
{
+ struct tb_switch *parent_sw = tb_to_switch(parent);
struct tb_xdomain *xd;
+ struct tb_port *down;
+
+ /* Make sure the downstream domain is accessible */
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
xd = kzalloc(sizeof(*xd), GFP_KERNEL);
if (!xd)
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 4562c8060d09..a6aabfd6e2da 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -3256,7 +3256,7 @@ static int __init cy_detect_isa(void)
return nboard;
/* probe for CD1400... */
- cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
+ cy_isa_address = ioremap(isa_address, CyISA_Ywin);
if (cy_isa_address == NULL) {
printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
"address\n");
@@ -3690,13 +3690,13 @@ static int cy_pci_probe(struct pci_dev *pdev,
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
card_name = "Cyclom-Y";
- addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ addr0 = ioremap(pci_resource_start(pdev, 0),
CyPCI_Yctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
- addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ addr2 = ioremap(pci_resource_start(pdev, 2),
CyPCI_Ywin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
@@ -3712,7 +3712,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
} else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
struct RUNTIME_9060 __iomem *ctl_addr;
- ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ ctl_addr = addr0 = ioremap(pci_resource_start(pdev, 0),
CyPCI_Zctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
@@ -3727,7 +3727,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
mailbox = readl(&ctl_addr->mail_box_0);
- addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ addr2 = ioremap(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 4c1cd49ae95b..620d8488b83e 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -898,7 +898,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
atomic_set(&priv->xmit_total, 0);
raw_spin_lock_init(&priv->lock);
- priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start,
+ priv->reg = devm_ioremap(priv->dev, dev->res.start,
resource_size(&dev->res));
if (!priv->reg) {
dev_err(priv->dev, "ioremap failed for resource %pR\n",
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 3a1a5e0ee93f..9f13f7d49dd7 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -961,7 +961,7 @@ static int moxa_pci_probe(struct pci_dev *pdev,
goto err;
}
- board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
+ board->basemem = ioremap(pci_resource_start(pdev, 2), 0x4000);
if (board->basemem == NULL) {
dev_err(&pdev->dev, "can't remap io space 2\n");
retval = -ENOMEM;
@@ -1071,7 +1071,7 @@ static int __init moxa_init(void)
brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
numports[i];
brd->busType = MOXA_BUS_TYPE_ISA;
- brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
+ brd->basemem = ioremap(baseaddr[i], 0x4000);
if (!brd->basemem) {
printk(KERN_ERR "MOXA: can't remap %lx\n",
baseaddr[i]);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 36a3eb4ad4c5..f1c90fa2978e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2704,7 +2704,7 @@ static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
}
/* called when a packet did not ack after watchdogtimeout */
-static void gsm_mux_net_tx_timeout(struct net_device *net)
+static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
/* Tell syslog we are hosed. */
dev_dbg(&net->dev, "Tx timed out.\n");
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 0809ae2aa9b1..673cda3d011d 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -55,7 +55,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
uart.port.uartclk = (dev->id.sversion != 0xad) ?
7272727 : 1843200;
uart.port.mapbase = address;
- uart.port.membase = ioremap_nocache(address, 16);
+ uart.port.membase = ioremap(address, 16);
if (!uart.port.membase) {
dev_warn(&dev->dev, "Failed to map memory\n");
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 1ee7b89817dd..6f343ca08440 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1143,7 +1143,7 @@ static int omap8250_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+ membase = devm_ioremap(&pdev->dev, regs->start,
resource_size(regs));
if (!membase)
return -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 022924d5ad54..939685fed396 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -275,7 +275,7 @@ static int pci_plx9050_init(struct pci_dev *dev)
/*
* enable/disable interrupts
*/
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
writel(irq_config, p + 0x4c);
@@ -299,7 +299,7 @@ static void pci_plx9050_exit(struct pci_dev *dev)
/*
* disable interrupts
*/
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p != NULL) {
writel(0, p + 0x4c);
@@ -475,7 +475,7 @@ static int pci_siig10x_init(struct pci_dev *dev)
break;
}
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index c4330d40c638..430e3467aff7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2765,7 +2765,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
}
if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, size);
+ port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
release_mem_region(port->mapbase, size);
ret = -ENOMEM;
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 6192ed011bc3..4552742c3859 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -673,7 +673,7 @@ static void dz_release_port(struct uart_port *uport)
static int dz_map_port(struct uart_port *uport)
{
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
dec_kn_slot_size);
if (!uport->membase) {
printk(KERN_ERR "dz: Cannot map MMIO\n");
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index fcbea43dc334..f67226df30d4 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -549,7 +549,7 @@ lqasc_request_port(struct uart_port *port)
}
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(&pdev->dev,
+ port->membase = devm_ioremap(&pdev->dev,
port->mapbase, size);
if (port->membase == NULL)
return -ENOMEM;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 052c2e2fdd15..d2c08b760f83 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -410,7 +410,7 @@ static int meson_uart_request_port(struct uart_port *port)
return -EBUSY;
}
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
port->mapsize);
if (!port->membase)
return -ENOMEM;
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 2a9953211a2a..47ab280f553b 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -470,7 +470,7 @@ static int __init mux_probe(struct parisc_device *dev)
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
- port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
+ port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET);
port->iotype = UPIO_MEM;
port->type = PORT_MUX;
port->irq = 0;
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index d2d8b3494685..42c8cc93b603 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -427,7 +427,7 @@ static int owl_uart_request_port(struct uart_port *port)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 0bdf1687983f..484b7e8d5381 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -618,7 +618,7 @@ static int pic32_uart_request_port(struct uart_port *port)
"pic32_uart_mem"))
return -EBUSY;
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res_mem));
if (!port->membase) {
dev_err(port->dev, "Unable to map registers\n");
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index ff9a27d48bca..b5ef86ae2746 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -498,7 +498,7 @@ static int rda_uart_request_port(struct uart_port *port)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index cb8185bffe42..bd5e7e9938ce 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -664,7 +664,7 @@ static int sbd_map_port(struct uart_port *uport)
struct sbd_duart *duart = sport->duart;
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
DUART_CHANREG_SPACING);
if (!uport->membase) {
printk(err);
@@ -672,7 +672,7 @@ static int sbd_map_port(struct uart_port *uport)
}
if (!sport->memctrl)
- sport->memctrl = ioremap_nocache(duart->mapctrl,
+ sport->memctrl = ioremap(duart->mapctrl,
DUART_CHANREG_SPACING);
if (!sport->memctrl) {
printk(err);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9b4ff872e297..c073aa7001c4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2676,7 +2676,7 @@ static int sci_remap_port(struct uart_port *port)
return 0;
if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
- port->membase = ioremap_nocache(port->mapbase, sport->reg_size);
+ port->membase = ioremap(port->mapbase, sport->reg_size);
if (unlikely(!port->membase)) {
dev_err(port->dev, "can't remap port#%d\n", port->line);
return -ENXIO;
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 1467952da3f6..4b4f604646a7 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -988,7 +988,7 @@ static void zs_release_port(struct uart_port *uport)
static int zs_map_port(struct uart_port *uport)
{
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
ZS_CHAN_IO_SIZE);
if (!uport->membase) {
printk(KERN_ERR "zs: Cannot map MMIO\n");
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 84f26e43b229..08d2976593d5 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -4054,7 +4054,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
}
info->lcr_mem_requested = true;
- info->memory_base = ioremap_nocache(info->phys_memory_base,
+ info->memory_base = ioremap(info->phys_memory_base,
0x40000);
if (!info->memory_base) {
printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
@@ -4068,7 +4068,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
goto errout;
}
- info->lcr_base = ioremap_nocache(info->phys_lcr_base,
+ info->lcr_base = ioremap(info->phys_lcr_base,
PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
@@ -7837,7 +7837,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned long flags;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 4d6a2c2aeb6c..b794177ccfb9 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1682,7 +1682,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slgt_info *info = dev_to_port(dev);
unsigned long flags;
@@ -3450,7 +3450,7 @@ static int claim_resources(struct slgt_info *info)
else
info->reg_addr_requested = true;
- info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
+ info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
if (!info->reg_addr) {
DBGERR(("%s can't map device registers, addr=%08X\n",
info->device_name, info->phys_reg_addr));
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 54b897a646d0..33ff2dbb6650 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1807,7 +1807,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
SLMP_INFO *info = dev_to_port(dev);
unsigned long flags;
@@ -3559,7 +3559,7 @@ static int claim_resources(SLMP_INFO *info)
else
info->sca_statctrl_requested = true;
- info->memory_base = ioremap_nocache(info->phys_memory_base,
+ info->memory_base = ioremap(info->phys_memory_base,
SCA_MEM_SIZE);
if (!info->memory_base) {
printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n",
@@ -3568,7 +3568,7 @@ static int claim_resources(SLMP_INFO *info)
goto errout;
}
- info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
+ info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base );
@@ -3577,7 +3577,7 @@ static int claim_resources(SLMP_INFO *info)
}
info->lcr_base += info->lcr_offset;
- info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
+ info->sca_base = ioremap(info->phys_sca_base, PAGE_SIZE);
if (!info->sca_base) {
printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_sca_base );
@@ -3586,7 +3586,7 @@ static int claim_resources(SLMP_INFO *info)
}
info->sca_base += info->sca_offset;
- info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
+ info->statctrl_base = ioremap(info->phys_statctrl_base,
PAGE_SIZE);
if (!info->statctrl_base) {
printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n",
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index 2a1e89d12ed9..84716d216ae5 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -53,4 +53,14 @@ config USB_CDNS3_TI
e.g. J721e.
+config USB_CDNS3_IMX
+ tristate "Cadence USB3 support on NXP i.MX platforms"
+ depends on ARCH_MXC || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for NXP i.MX
+ platforms that contain Cadence USB3 controller core.
+
+ For example, imx8qm and imx8qxp.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index 948e6b88d1a9..d47e341a6f39 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -15,3 +15,4 @@ cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
new file mode 100644
index 000000000000..aba988e71958
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+
+#define USB3_CORE_CTRL1 0x00
+#define USB3_CORE_CTRL2 0x04
+#define USB3_INT_REG 0x08
+#define USB3_CORE_STATUS 0x0c
+#define XHCI_DEBUG_LINK_ST 0x10
+#define XHCI_DEBUG_BUS 0x14
+#define USB3_SSPHY_CTRL1 0x40
+#define USB3_SSPHY_CTRL2 0x44
+#define USB3_SSPHY_STATUS 0x4c
+#define USB2_PHY_CTRL1 0x50
+#define USB2_PHY_CTRL2 0x54
+#define USB2_PHY_STATUS 0x5c
+
+/* Register bits definition */
+
+/* USB3_CORE_CTRL1 */
+#define SW_RESET_MASK (0x3f << 26)
+#define PWR_SW_RESET BIT(31)
+#define APB_SW_RESET BIT(30)
+#define AXI_SW_RESET BIT(29)
+#define RW_SW_RESET BIT(28)
+#define PHY_SW_RESET BIT(27)
+#define PHYAHB_SW_RESET BIT(26)
+#define ALL_SW_RESET (PWR_SW_RESET | APB_SW_RESET | AXI_SW_RESET | \
+ RW_SW_RESET | PHY_SW_RESET | PHYAHB_SW_RESET)
+#define OC_DISABLE BIT(9)
+#define MDCTRL_CLK_SEL BIT(7)
+#define MODE_STRAP_MASK (0x7)
+#define DEV_MODE (1 << 2)
+#define HOST_MODE (1 << 1)
+#define OTG_MODE (1 << 0)
+
+/* USB3_INT_REG */
+#define CLK_125_REQ BIT(29)
+#define LPM_CLK_REQ BIT(28)
+#define DEVU3_WAEKUP_EN BIT(14)
+#define OTG_WAKEUP_EN BIT(12)
+#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
+#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
+
+/* USB3_CORE_STATUS */
+#define MDCTRL_CLK_STATUS BIT(15)
+#define DEV_POWER_ON_READY BIT(13)
+#define HOST_POWER_ON_READY BIT(12)
+
+/* USB3_SSPHY_STATUS */
+#define CLK_VALID_MASK (0x3f << 26)
+#define CLK_VALID_COMPARE_BITS (0xf << 28)
+#define PHY_REFCLK_REQ (1 << 0)
+
+struct cdns_imx {
+ struct device *dev;
+ void __iomem *noncore;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset)
+{
+ return readl(data->noncore + offset);
+}
+
+static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value)
+{
+ writel(value, data->noncore + offset);
+}
+
+static const struct clk_bulk_data imx_cdns3_core_clks[] = {
+ { .id = "usb3_lpm_clk" },
+ { .id = "usb3_bus_clk" },
+ { .id = "usb3_aclk" },
+ { .id = "usb3_ipg_clk" },
+ { .id = "usb3_core_pclk" },
+};
+
+static int cdns_imx_noncore_init(struct cdns_imx *data)
+{
+ u32 value;
+ int ret;
+ struct device *dev = data->dev;
+
+ cdns_imx_writel(data, USB3_SSPHY_STATUS, CLK_VALID_MASK);
+ udelay(1);
+ ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
+ (value & CLK_VALID_COMPARE_BITS) == CLK_VALID_COMPARE_BITS,
+ 10, 100000);
+ if (ret) {
+ dev_err(dev, "wait clkvld timeout\n");
+ return ret;
+ }
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value |= ALL_SW_RESET;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+ udelay(1);
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value = (value & ~MODE_STRAP_MASK) | OTG_MODE | OC_DISABLE;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+
+ value = cdns_imx_readl(data, USB3_INT_REG);
+ value |= HOST_INT1_EN | DEV_INT_EN;
+ cdns_imx_writel(data, USB3_INT_REG, value);
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value &= ~ALL_SW_RESET;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+ return ret;
+}
+
+static int cdns_imx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct cdns_imx *data;
+ int ret;
+
+ if (!node)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+ data->dev = dev;
+ data->noncore = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->noncore)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->noncore);
+ }
+
+ data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
+ data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
+ if (ret)
+ return ret;
+
+ ret = cdns_imx_noncore_init(data);
+ if (ret)
+ goto err;
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create children: %d\n", ret);
+ goto err;
+ }
+
+ return ret;
+
+err:
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ return ret;
+}
+
+static int cdns_imx_remove_core(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_imx_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_imx_of_match[] = {
+ { .compatible = "fsl,imx8qm-usb3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_imx_of_match);
+
+static struct platform_driver cdns_imx_driver = {
+ .probe = cdns_imx_probe,
+ .remove = cdns_imx_remove,
+ .driver = {
+ .name = "cdns3-imx",
+ .of_match_table = cdns_imx_of_match,
+ },
+};
+module_platform_driver(cdns_imx_driver);
+
+MODULE_ALIAS("platform:cdns3-imx");
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 i.MX Glue Layer");
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/debug.h
index 2c9afbfe988b..a5c6a29e1340 100644
--- a/drivers/usb/cdns3/debug.h
+++ b/drivers/usb/cdns3/debug.h
@@ -140,7 +140,7 @@ static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep,
trb_per_sector = TRBS_PER_SEGMENT;
if (trb_per_sector > TRBS_PER_SEGMENT) {
- sprintf(str + ret, "\t\tTo big transfer ring %d\n",
+ sprintf(str + ret, "\t\tTransfer ring %d too big\n",
trb_per_sector);
return str;
}
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 02f6ca2cb1ba..736b0c6e27fe 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -71,6 +71,23 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags);
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+
+/**
+ * cdns3_clear_register_bit - clear bit in given register.
+ * @ptr: address of device controller register to be read and changed
+ * @mask: bits requested to clar
+ */
+void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+{
+ mask = readl(ptr) & ~mask;
+ writel(mask, ptr);
+}
+
/**
* cdns3_set_register_bit - set bit in given register.
* @ptr: address of device controller register to be read and changed
@@ -150,6 +167,21 @@ void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
writel(ep, &priv_dev->regs->ep_sel);
}
+/**
+ * cdns3_get_tdl - gets current tdl for selected endpoint.
+ * @priv_dev: extended gadget object
+ *
+ * Before calling this function the appropriate endpoint must
+ * be selected by means of cdns3_select_ep function.
+ */
+static int cdns3_get_tdl(struct cdns3_device *priv_dev)
+{
+ if (priv_dev->dev_ver < DEV_VER_V3)
+ return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+ else
+ return readl(&priv_dev->regs->ep_tdl);
+}
+
dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
struct cdns3_trb *trb)
{
@@ -166,7 +198,22 @@ int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
case USB_ENDPOINT_XFER_CONTROL:
return TRB_CTRL_RING_SIZE;
default:
- return TRB_RING_SIZE;
+ if (priv_ep->use_streams)
+ return TRB_STREAM_RING_SIZE;
+ else
+ return TRB_RING_SIZE;
+ }
+}
+
+static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (priv_ep->trb_pool) {
+ dma_free_coherent(priv_dev->sysdev,
+ cdns3_ring_size(priv_ep),
+ priv_ep->trb_pool, priv_ep->trb_pool_dma);
+ priv_ep->trb_pool = NULL;
}
}
@@ -180,8 +227,12 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
int ring_size = cdns3_ring_size(priv_ep);
+ int num_trbs = ring_size / TRB_SIZE;
struct cdns3_trb *link_trb;
+ if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
+ cdns3_free_trb_pool(priv_ep);
+
if (!priv_ep->trb_pool) {
priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
ring_size,
@@ -189,32 +240,30 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
GFP_DMA32 | GFP_ATOMIC);
if (!priv_ep->trb_pool)
return -ENOMEM;
- } else {
+
+ priv_ep->alloc_ring_size = ring_size;
memset(priv_ep->trb_pool, 0, ring_size);
}
+ priv_ep->num_trbs = num_trbs;
+
if (!priv_ep->num)
return 0;
- priv_ep->num_trbs = ring_size / TRB_SIZE;
- /* Initialize the last TRB as Link TRB. */
+ /* Initialize the last TRB as Link TRB */
link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
- link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
-
- return 0;
-}
-static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
-{
- struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
-
- if (priv_ep->trb_pool) {
- dma_free_coherent(priv_dev->sysdev,
- cdns3_ring_size(priv_ep),
- priv_ep->trb_pool, priv_ep->trb_pool_dma);
- priv_ep->trb_pool = NULL;
+ if (priv_ep->use_streams) {
+ /*
+ * For stream capable endpoints driver use single correct TRB.
+ * The last trb has zeroed cycle bit
+ */
+ link_trb->control = 0;
+ } else {
+ link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
+ link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
}
+ return 0;
}
/**
@@ -253,6 +302,7 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
priv_dev->onchip_used_size = 0;
priv_dev->out_mem_is_allocated = 0;
priv_dev->wait_for_setup = 0;
+ priv_dev->using_streams = 0;
}
/**
@@ -356,17 +406,43 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
{
struct usb_request *request;
int ret = 0;
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+ /*
+ * If the last pending transfer is INTERNAL
+ * OR streams are enabled for this endpoint
+ * do NOT start new transfer till the last one is pending
+ */
+ if (!pending_empty) {
+ struct cdns3_request *priv_req;
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ priv_req = to_cdns3_request(request);
+ if ((priv_req->flags & REQUEST_INTERNAL) ||
+ (priv_ep->flags & EP_TDLCHK_EN) ||
+ priv_ep->use_streams) {
+ trace_printk("Blocking external request\n");
+ return ret;
+ }
+ }
while (!list_empty(&priv_ep->deferred_req_list)) {
request = cdns3_next_request(&priv_ep->deferred_req_list);
- ret = cdns3_ep_run_transfer(priv_ep, request);
+ if (!priv_ep->use_streams) {
+ ret = cdns3_ep_run_transfer(priv_ep, request);
+ } else {
+ priv_ep->stream_sg_idx = 0;
+ ret = cdns3_ep_run_stream_transfer(priv_ep, request);
+ }
if (ret)
return ret;
list_del(&request->list);
list_add_tail(&request->list,
&priv_ep->pending_req_list);
+ if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
+ break;
}
priv_ep->flags &= ~EP_RING_FULL;
@@ -379,7 +455,7 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
* buffer for unblocking on-chip FIFO buffer. This flag will be cleared
* if before first DESCMISS interrupt the DMA will be armed.
*/
-#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
+#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
(reg) |= EP_STS_EN_DESCMISEN; \
@@ -450,10 +526,17 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
if (!req)
return NULL;
+ /* unmap the gadget request before copying data */
+ usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
+ priv_ep->dir);
+
cdns3_wa2_descmiss_copy_data(priv_ep, req);
if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
req->length != req->actual) {
/* wait for next part of transfer */
+ /* re-map the gadget request buffer*/
+ usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
+ usb_endpoint_dir_in(priv_ep->endpoint.desc));
return NULL;
}
@@ -570,6 +653,13 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
{
struct cdns3_request *priv_req;
struct usb_request *request;
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+ /* check for pending transfer */
+ if (!pending_empty) {
+ trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
+ return;
+ }
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
@@ -578,8 +668,10 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
- if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
+ if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
+ trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
cdns3_wa2_remove_old_request(priv_ep);
+ }
request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
GFP_ATOMIC);
@@ -621,6 +713,78 @@ err:
"Failed: No sufficient memory for DESCMIS\n");
}
+static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
+{
+ u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+ if (tdl) {
+ u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
+
+ writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ }
+}
+
+static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
+{
+ u32 ep_sts_reg;
+
+ /* select EP0-out */
+ cdns3_select_ep(priv_dev, 0);
+
+ ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+
+ if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
+ u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
+ struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
+
+ if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
+ outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
+ u8 pending_empty = list_empty(&outq_ep->pending_req_list);
+
+ if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
+ (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
+ !pending_empty) {
+ } else {
+ u32 ep_sts_en_reg;
+ u32 ep_cmd_reg;
+
+ cdns3_select_ep(priv_dev, outq_ep->num |
+ outq_ep->dir);
+ ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
+ ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
+
+ outq_ep->flags |= EP_TDLCHK_EN;
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
+ EP_CFG_TDL_CHK);
+
+ cdns3_wa2_enable_detection(priv_dev, outq_ep,
+ ep_sts_en_reg);
+ writel(ep_sts_en_reg,
+ &priv_dev->regs->ep_sts_en);
+ /* reset tdl value to zero */
+ cdns3_wa2_reset_tdl(priv_dev);
+ /*
+ * Memory barrier - Reset tdl before ringing the
+ * doorbell.
+ */
+ wmb();
+ if (EP_CMD_DRDY & ep_cmd_reg) {
+ trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
+
+ } else {
+ trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
+ /*
+ * ring doorbell to generate DESCMIS irq
+ */
+ writel(EP_CMD_DRDY,
+ &priv_dev->regs->ep_cmd);
+ }
+ }
+ }
+ }
+}
+
/**
* cdns3_gadget_giveback - call struct usb_request's ->complete callback
* @priv_ep: The endpoint to whom the request belongs to
@@ -807,14 +971,120 @@ static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
cdns3_wa1_restore_cycle_bit(priv_ep);
}
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *trb;
+ dma_addr_t trb_dma;
+ int address;
+ u32 control;
+ u32 length;
+ u32 tdl;
+ unsigned int sg_idx = priv_ep->stream_sg_idx;
+
+ priv_req = to_cdns3_request(request);
+ address = priv_ep->endpoint.desc->bEndpointAddress;
+
+ priv_ep->flags |= EP_PENDING_REQUEST;
+
+ /* must allocate buffer aligned to 8 */
+ if (priv_req->flags & REQUEST_UNALIGNED)
+ trb_dma = priv_req->aligned_buf->dma;
+ else
+ trb_dma = request->dma;
+
+ /* For stream capable endpoints driver use only single TD. */
+ trb = priv_ep->trb_pool + priv_ep->enqueue;
+ priv_req->start_trb = priv_ep->enqueue;
+ priv_req->end_trb = priv_req->start_trb;
+ priv_req->trb = trb;
+
+ cdns3_select_ep(priv_ep->cdns3_dev, address);
+
+ control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
+ TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
+
+ if (!request->num_sgs) {
+ trb->buffer = TRB_BUFFER(trb_dma);
+ length = request->length;
+ } else {
+ trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
+ length = request->sg[sg_idx].length;
+ }
+
+ tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
+
+ trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
+ TRB_LEN(length);
+
+ /*
+ * For DEV_VER_V2 controller version we have enabled
+ * USB_CONF2_EN_TDL_TRB in DMULT configuration.
+ * This enables TDL calculation based on TRB, hence setting TDL in TRB.
+ */
+ if (priv_dev->dev_ver >= DEV_VER_V2) {
+ if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+ trb->length |= TRB_TDL_SS_SIZE(tdl);
+ }
+ priv_req->flags |= REQUEST_PENDING;
+
+ trb->control = control;
+
+ trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+
+ /*
+ * Memory barrier - Cycle Bit must be set before trb->length and
+ * trb->buffer fields.
+ */
+ wmb();
+
+ /* always first element */
+ writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
+ &priv_dev->regs->ep_traddr);
+
+ if (!(priv_ep->flags & EP_STALLED)) {
+ trace_cdns3_ring(priv_ep);
+ /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
+ writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
+
+ priv_ep->prime_flag = false;
+
+ /*
+ * Controller version DEV_VER_V2 tdl calculation
+ * is based on TRB
+ */
+
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ else if (priv_dev->dev_ver > DEV_VER_V2)
+ writel(tdl, &priv_dev->regs->ep_tdl);
+
+ priv_ep->last_stream_id = priv_req->request.stream_id;
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
+ EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
+
+ trace_cdns3_doorbell_epx(priv_ep->name,
+ readl(&priv_dev->regs->ep_traddr));
+ }
+
+ /* WORKAROUND for transition to L0 */
+ __cdns3_gadget_wakeup(priv_dev);
+
+ return 0;
+}
+
/**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object
*
* Returns zero on success or negative value on failure
*/
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
- struct usb_request *request)
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
@@ -826,6 +1096,7 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
int address;
u32 control;
int pcs;
+ u16 total_tdl = 0;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
num_trb = priv_ep->interval;
@@ -910,6 +1181,9 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (likely(priv_dev->dev_ver >= DEV_VER_V2))
td_size = DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);
+ else if (priv_ep->flags & EP_TDLCHK_EN)
+ total_tdl += DIV_ROUND_UP(length,
+ priv_ep->endpoint.maxpacket);
trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
TRB_LEN(length);
@@ -954,6 +1228,23 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (sg_iter == 1)
trb->control |= TRB_IOC | TRB_ISP;
+ if (priv_dev->dev_ver < DEV_VER_V2 &&
+ (priv_ep->flags & EP_TDLCHK_EN)) {
+ u16 tdl = total_tdl;
+ u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+ if (tdl > EP_CMD_TDL_MAX) {
+ tdl = EP_CMD_TDL_MAX;
+ priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
+ }
+
+ if (old_tdl < tdl) {
+ tdl -= old_tdl;
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ }
+ }
+
/*
* Memory barrier - cycle bit must be set before other filds in trb.
*/
@@ -1153,29 +1444,56 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
cdns3_move_deq_to_next_trb(priv_req);
}
- /* Re-select endpoint. It could be changed by other CPU during
- * handling usb_gadget_giveback_request.
- */
- cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+ if (!request->stream_id) {
+ /* Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
- if (!cdns3_request_handled(priv_ep, priv_req))
- goto prepare_next_td;
+ if (!cdns3_request_handled(priv_ep, priv_req))
+ goto prepare_next_td;
- trb = priv_ep->trb_pool + priv_ep->dequeue;
- trace_cdns3_complete_trb(priv_ep, trb);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+ trace_cdns3_complete_trb(priv_ep, trb);
+
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
+
+ request->actual = TRB_LEN(le32_to_cpu(trb->length));
+ cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
+ TRBS_PER_SEGMENT == 2)
+ break;
+ } else {
+ /* Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+
+ trb = priv_ep->trb_pool;
+ trace_cdns3_complete_trb(priv_ep, trb);
- if (trb != priv_req->trb)
- dev_warn(priv_dev->dev,
- "request_trb=0x%p, queue_trb=0x%p\n",
- priv_req->trb, trb);
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
- request->actual = TRB_LEN(le32_to_cpu(trb->length));
- cdns3_move_deq_to_next_trb(priv_req);
- cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ request->actual += TRB_LEN(le32_to_cpu(trb->length));
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
- TRBS_PER_SEGMENT == 2)
+ if (!request->num_sgs ||
+ (request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
+ priv_ep->stream_sg_idx = 0;
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ } else {
+ priv_ep->stream_sg_idx++;
+ cdns3_ep_run_stream_transfer(priv_ep, request);
+ }
break;
+ }
}
priv_ep->flags &= ~EP_PENDING_REQUEST;
@@ -1205,6 +1523,21 @@ void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
}
}
+static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
+{
+ u16 tdl = priv_ep->pending_tdl;
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (tdl > EP_CMD_TDL_MAX) {
+ tdl = EP_CMD_TDL_MAX;
+ priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
+ } else {
+ priv_ep->pending_tdl = 0;
+ }
+
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
+}
+
/**
* cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
* @priv_ep: endpoint object
@@ -1215,6 +1548,9 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
u32 ep_sts_reg;
+ struct usb_request *deferred_request;
+ struct usb_request *pending_request;
+ u32 tdl = 0;
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
@@ -1223,6 +1559,36 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
writel(ep_sts_reg, &priv_dev->regs->ep_sts);
+ if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
+ bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
+
+ tdl = cdns3_get_tdl(priv_dev);
+
+ /*
+ * Continue the previous transfer:
+ * There is some racing between ERDY and PRIME. The device send
+ * ERDY and almost in the same time Host send PRIME. It cause
+ * that host ignore the ERDY packet and driver has to send it
+ * again.
+ */
+ if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
+ EP_STS_HOSTPP(ep_sts_reg))) {
+ writel(EP_CMD_ERDY |
+ EP_CMD_ERDY_SID(priv_ep->last_stream_id),
+ &priv_dev->regs->ep_cmd);
+ ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
+ } else {
+ priv_ep->prime_flag = true;
+
+ pending_request = cdns3_next_request(&priv_ep->pending_req_list);
+ deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
+
+ if (deferred_request && !pending_request) {
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
+ }
+ }
+
if (ep_sts_reg & EP_STS_TRBERR) {
if (priv_ep->flags & EP_STALL_PENDING &&
!(ep_sts_reg & EP_STS_DESCMIS &&
@@ -1259,7 +1625,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
}
}
- if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
+ if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
+ (ep_sts_reg & EP_STS_IOT)) {
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
if (ep_sts_reg & EP_STS_ISP)
priv_ep->flags |= EP_QUIRK_END_TRANSFER;
@@ -1267,6 +1634,29 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
}
+ if (!priv_ep->use_streams) {
+ if ((ep_sts_reg & EP_STS_IOC) ||
+ (ep_sts_reg & EP_STS_ISP)) {
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ } else if ((priv_ep->flags & EP_TDLCHK_EN) &
+ priv_ep->pending_tdl) {
+ /* handle IOT with pending tdl */
+ cdns3_reprogram_tdl(priv_ep);
+ }
+ } else if (priv_ep->dir == USB_DIR_OUT) {
+ priv_ep->ep_sts_pending |= ep_sts_reg;
+ } else if (ep_sts_reg & EP_STS_IOT) {
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ }
+ }
+
+ /*
+ * MD_EXIT interrupt sets when stream capable endpoint exits
+ * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
+ */
+ if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
+ (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
+ priv_ep->ep_sts_pending = 0;
cdns3_transfer_completed(priv_dev, priv_ep);
}
@@ -1274,7 +1664,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
* WA2: this condition should only be meet when
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
- * In other cases this interrupt will be disabled/
+ * In other cases this interrupt will be disabled.
*/
if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
!(priv_ep->flags & EP_STALLED))
@@ -1457,6 +1847,9 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
ret = IRQ_HANDLED;
}
+ if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
+ cdns3_wa2_check_outq_status(priv_dev);
+
irqend:
writel(~0, &priv_dev->regs->ep_ien);
spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -1511,6 +1904,27 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
+void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
+ return;
+
+ if (priv_dev->dev_ver >= DEV_VER_V3) {
+ u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+
+ /*
+ * Stream capable endpoints are handled by using ep_tdl
+ * register. Other endpoints use TDL from TRB feature.
+ */
+ cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
+ }
+
+ /* Enable Stream Bit TDL chk and SID chk */
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
+ EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
+}
+
void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
@@ -1772,6 +2186,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
{
struct cdns3_endpoint *priv_ep;
struct cdns3_device *priv_dev;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
u32 reg = EP_STS_EN_TRBERREN;
u32 bEndpointAddress;
unsigned long flags;
@@ -1781,6 +2196,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
+ comp_desc = priv_ep->endpoint.comp_desc;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
@@ -1811,6 +2227,24 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
goto exit;
}
+ bEndpointAddress = priv_ep->num | priv_ep->dir;
+ cdns3_select_ep(priv_dev, bEndpointAddress);
+
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+ /*
+ * Enable stream support (SS mode) related interrupts
+ * in EP_STS_EN Register
+ */
+ if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
+ reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
+ EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
+ EP_STS_EN_STREAMREN;
+ priv_ep->use_streams = true;
+ cdns3_stream_ep_reconfig(priv_dev, priv_ep);
+ priv_dev->using_streams |= true;
+ }
+ }
+
ret = cdns3_allocate_trb_pool(priv_ep);
if (ret)
@@ -1957,6 +2391,7 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
ep->desc = NULL;
priv_ep->flags &= ~EP_ENABLED;
+ priv_ep->use_streams = false;
spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -2005,13 +2440,21 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
list_add_tail(&request->list, &priv_ep->deferred_req_list);
/*
+ * For stream capable endpoint if prime irq flag is set then only start
+ * request.
* If hardware endpoint configuration has not been set yet then
* just queue request in deferred list. Transfer will be started in
* cdns3_set_hw_configuration.
*/
- if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
- !(priv_ep->flags & EP_STALL_PENDING))
- cdns3_start_all_request(priv_dev, priv_ep);
+ if (!request->stream_id) {
+ if (priv_dev->hw_configured_flag &&
+ !(priv_ep->flags & EP_STALLED) &&
+ !(priv_ep->flags & EP_STALL_PENDING))
+ cdns3_start_all_request(priv_dev, priv_ep);
+ } else {
+ if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
return 0;
}
@@ -2384,7 +2827,6 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
struct cdns3_endpoint *priv_ep;
u32 bEndpointAddress;
struct usb_ep *ep;
- int ret = 0;
int val;
priv_dev->gadget_driver = NULL;
@@ -2408,7 +2850,7 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
writel(0, &priv_dev->regs->usb_ien);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
- return ret;
+ return 0;
}
static const struct usb_gadget_ops cdns3_gadget_ops = {
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index bc4024041ef2..f003a7801872 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -599,6 +599,7 @@ struct cdns3_usb_regs {
#define EP_CMD_TDL_MASK GENMASK(15, 9)
#define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK)
#define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9)
+#define EP_CMD_TDL_MAX (EP_CMD_TDL_MASK >> 9)
/* ERDY Stream ID value (used in SS mode). */
#define EP_CMD_ERDY_SID_MASK GENMASK(31, 16)
@@ -969,10 +970,18 @@ struct cdns3_usb_regs {
#define ISO_MAX_INTERVAL 10
+#define MAX_TRB_LENGTH BIT(16)
+
#if TRBS_PER_SEGMENT < 2
#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2."
#endif
+#define TRBS_PER_STREAM_SEGMENT 2
+
+#if TRBS_PER_STREAM_SEGMENT < 2
+#error "Incorrect TRBS_PER_STREAMS_SEGMENT. Minimal Transfer Ring size is 2."
+#endif
+
/*
*Only for ISOC endpoints - maximum number of TRBs is calculated as
* pow(2, bInterval-1) * number of usb requests. It is limitation made by
@@ -1000,6 +1009,7 @@ struct cdns3_trb {
#define TRB_SIZE (sizeof(struct cdns3_trb))
#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT)
+#define TRB_STREAM_RING_SIZE (TRB_SIZE * TRBS_PER_STREAM_SEGMENT)
#define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT)
#define TRB_CTRL_RING_SIZE (TRB_SIZE * 2)
@@ -1078,7 +1088,7 @@ struct cdns3_trb {
#define CDNS3_ENDPOINTS_MAX_COUNT 32
#define CDNS3_EP_ZLP_BUF_SIZE 1024
-#define CDNS3_EP_BUF_SIZE 2 /* KB */
+#define CDNS3_EP_BUF_SIZE 4 /* KB */
#define CDNS3_EP_ISO_HS_MULT 3
#define CDNS3_EP_ISO_SS_BURST 3
#define CDNS3_MAX_NUM_DESCMISS_BUF 32
@@ -1109,6 +1119,7 @@ struct cdns3_device;
* @interval: interval between packets used for ISOC endpoint.
* @free_trbs: number of free TRBs in transfer ring
* @num_trbs: number of all TRBs in transfer ring
+ * @alloc_ring_size: size of the allocated TRB ring
* @pcs: producer cycle state
* @ccs: consumer cycle state
* @enqueue: enqueue index in transfer ring
@@ -1142,6 +1153,7 @@ struct cdns3_endpoint {
#define EP_QUIRK_END_TRANSFER BIT(11)
#define EP_QUIRK_EXTRA_BUF_DET BIT(12)
#define EP_QUIRK_EXTRA_BUF_EN BIT(13)
+#define EP_TDLCHK_EN BIT(15)
u32 flags;
struct cdns3_request *descmis_req;
@@ -1153,6 +1165,7 @@ struct cdns3_endpoint {
int free_trbs;
int num_trbs;
+ int alloc_ring_size;
u8 pcs;
u8 ccs;
int enqueue;
@@ -1163,6 +1176,14 @@ struct cdns3_endpoint {
struct cdns3_trb *wa1_trb;
unsigned int wa1_trb_index;
unsigned int wa1_cycle_bit:1;
+
+ /* Stream related */
+ unsigned int use_streams:1;
+ unsigned int prime_flag:1;
+ u32 ep_sts_pending;
+ u16 last_stream_id;
+ u16 pending_tdl;
+ unsigned int stream_sg_idx;
};
/**
@@ -1290,6 +1311,7 @@ struct cdns3_device {
int hw_configured_flag:1;
int wake_up_flag:1;
unsigned status_completion_no_call:1;
+ unsigned using_streams:1;
int out_mem_is_allocated;
struct work_struct pending_status_wq;
@@ -1310,8 +1332,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev);
void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep);
void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable);
struct usb_request *cdns3_next_request(struct list_head *list);
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
- struct usb_request *request);
void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm);
int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep);
u8 cdns3_ep_addr_to_index(u8 ep_addr);
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
index e92348c9b4d7..8d121e207fd8 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/trace.h
@@ -122,18 +122,24 @@ DECLARE_EVENT_CLASS(cdns3_log_epx_irq,
__string(ep_name, priv_ep->name)
__field(u32, ep_sts)
__field(u32, ep_traddr)
+ __field(u32, ep_last_sid)
+ __field(u32, use_streams)
__dynamic_array(char, str, CDNS3_MSG_MAX)
),
TP_fast_assign(
__assign_str(ep_name, priv_ep->name);
__entry->ep_sts = readl(&priv_dev->regs->ep_sts);
__entry->ep_traddr = readl(&priv_dev->regs->ep_traddr);
+ __entry->ep_last_sid = priv_ep->last_stream_id;
+ __entry->use_streams = priv_ep->use_streams;
),
- TP_printk("%s, ep_traddr: %08x",
+ TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d",
cdns3_decode_epx_irq(__get_str(str),
__get_str(ep_name),
__entry->ep_sts),
- __entry->ep_traddr)
+ __entry->ep_traddr,
+ __entry->ep_last_sid,
+ __entry->use_streams)
);
DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq,
@@ -210,6 +216,7 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__field(int, end_trb)
__field(struct cdns3_trb *, start_trb_addr)
__field(int, flags)
+ __field(unsigned int, stream_id)
),
TP_fast_assign(
__assign_str(name, req->priv_ep->name);
@@ -225,9 +232,10 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__entry->end_trb = req->end_trb;
__entry->start_trb_addr = req->trb;
__entry->flags = req->flags;
+ __entry->stream_id = req->request.stream_id;
),
TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d,"
- " trb: [start:%d, end:%d: virt addr %pa], flags:%x ",
+ " trb: [start:%d, end:%d: virt addr %pa], flags:%x SID: %u",
__get_str(name), __entry->req, __entry->buf, __entry->actual,
__entry->length,
__entry->zero ? "Z" : "z",
@@ -237,7 +245,8 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__entry->start_trb,
__entry->end_trb,
__entry->start_trb_addr,
- __entry->flags
+ __entry->flags,
+ __entry->stream_id
)
);
@@ -281,6 +290,39 @@ TRACE_EVENT(cdns3_ep0_queue,
__entry->length)
);
+DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->priv_ep->name)
+ __field(struct cdns3_request *, req)
+ __field(unsigned int, length)
+ __field(unsigned int, actual)
+ __field(unsigned int, stream_id)
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->priv_ep->name);
+ __entry->req = req;
+ __entry->actual = req->request.length;
+ __entry->length = req->request.actual;
+ __entry->stream_id = req->request.stream_id;
+ ),
+ TP_printk("%s: req: %p,request length: %u actual length: %u SID: %u",
+ __get_str(name), __entry->req, __entry->length,
+ __entry->actual, __entry->stream_id)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len,
+ cdns3_stream_transfer_split_next_part,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
DECLARE_EVENT_CLASS(cdns3_log_aligned_request,
TP_PROTO(struct cdns3_request *priv_req),
TP_ARGS(priv_req),
@@ -319,6 +361,34 @@ DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request,
TP_ARGS(req)
);
+DECLARE_EVENT_CLASS(cdns3_log_map_request,
+ TP_PROTO(struct cdns3_request *priv_req),
+ TP_ARGS(priv_req),
+ TP_STRUCT__entry(
+ __string(name, priv_req->priv_ep->name)
+ __field(struct usb_request *, req)
+ __field(void *, buf)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_req->priv_ep->name);
+ __entry->req = &priv_req->request;
+ __entry->buf = priv_req->request.buf;
+ __entry->dma = priv_req->request.dma;
+ ),
+ TP_printk("%s: req: %p, req buf %p, dma %p",
+ __get_str(name), __entry->req, __entry->buf, &__entry->dma
+ )
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
DECLARE_EVENT_CLASS(cdns3_log_trb,
TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
TP_ARGS(priv_ep, trb),
@@ -329,6 +399,7 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__field(u32, length)
__field(u32, control)
__field(u32, type)
+ __field(unsigned int, last_stream_id)
),
TP_fast_assign(
__assign_str(name, priv_ep->name);
@@ -337,8 +408,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__entry->length = trb->length;
__entry->control = trb->control;
__entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
+ __entry->last_stream_id = priv_ep->last_stream_id;
),
- TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)",
+ TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u",
__get_str(name), __entry->trb, __entry->buffer,
TRB_LEN(__entry->length),
(u8)TRB_BURST_LEN_GET(__entry->length),
@@ -349,7 +421,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__entry->control & TRB_FIFO_MODE ? "FIFO, " : "",
__entry->control & TRB_CHAIN ? "CHAIN, " : "",
__entry->control & TRB_IOC ? "IOC, " : "",
- TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK"
+ TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK",
+ TRB_FIELD_TO_STREAMID(__entry->control),
+ __entry->last_stream_id
)
);
@@ -398,6 +472,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
__field(unsigned int, maxpacket)
__field(unsigned int, maxpacket_limit)
__field(unsigned int, max_streams)
+ __field(unsigned int, use_streams)
__field(unsigned int, maxburst)
__field(unsigned int, flags)
__field(unsigned int, dir)
@@ -409,16 +484,18 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
__entry->maxpacket = priv_ep->endpoint.maxpacket;
__entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit;
__entry->max_streams = priv_ep->endpoint.max_streams;
+ __entry->use_streams = priv_ep->use_streams;
__entry->maxburst = priv_ep->endpoint.maxburst;
__entry->flags = priv_ep->flags;
__entry->dir = priv_ep->dir;
__entry->enqueue = priv_ep->enqueue;
__entry->dequeue = priv_ep->dequeue;
),
- TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, "
- "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
+ TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, "
+ "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
__get_str(name), __entry->maxpacket,
__entry->maxpacket_limit, __entry->max_streams,
+ __entry->use_streams,
__entry->maxburst, __entry->enqueue,
__entry->dequeue,
__entry->flags & EP_ENABLED ? "EN | " : "",
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index ae850b3fddf2..d53db520e209 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -7,6 +7,7 @@ config USB_CHIPIDEA
select RESET_CONTROLLER
select USB_ULPI_BUS
select USB_ROLE_SWITCH
+ select USB_TEGRA_PHY if ARCH_TEGRA
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. It supports:
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6911aef500e9..d49d5e1235d0 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -302,6 +302,16 @@ static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci)
return USB_ROLE_NONE;
}
+static inline enum ci_role usb_role_to_ci_role(enum usb_role role)
+{
+ if (role == USB_ROLE_HOST)
+ return CI_ROLE_HOST;
+ else if (role == USB_ROLE_DEVICE)
+ return CI_ROLE_GADGET;
+ else
+ return CI_ROLE_END;
+}
+
/**
* hw_read_id_reg: reads from a identification register
* @ci: the controller
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 0c9911d44ee5..7455df0ede49 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -83,13 +83,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
return err;
}
- /*
- * Tegra's USB PHY driver doesn't implement optional phy_init()
- * hook, so we have to power on UDC controller before ChipIdea
- * driver initialization kicks in.
- */
- usb_phy_set_suspend(udc->phy, 0);
-
/* setup and register ChipIdea HDRC device */
udc->data.name = "tegra-udc";
udc->data.flags = soc->flags;
@@ -109,7 +102,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
return 0;
fail_power_off:
- usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
return err;
}
@@ -119,7 +111,6 @@ static int tegra_udc_remove(struct platform_device *pdev)
struct tegra_udc *udc = platform_get_drvdata(pdev);
ci_hdrc_remove_device(udc->dev);
- usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index dce5db41501c..52139c2a9924 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -618,9 +618,11 @@ static int ci_usb_role_switch_set(struct device *dev, enum usb_role role)
struct ci_hdrc *ci = dev_get_drvdata(dev);
struct ci_hdrc_cable *cable = NULL;
enum usb_role current_role = ci_role_to_usb_role(ci);
+ enum ci_role ci_role = usb_role_to_ci_role(role);
unsigned long flags;
- if (current_role == role)
+ if ((ci_role != CI_ROLE_END && !ci->roles[ci_role]) ||
+ (current_role == role))
return 0;
pm_runtime_get_sync(ci->dev);
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 70112cf0f195..2625aa01a911 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -20,7 +20,7 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
}
-static void ci_hdrc_host_driver_init(void)
+static inline void ci_hdrc_host_driver_init(void)
{
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 12bb5722b420..6833c918abce 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -574,7 +574,7 @@ __acquires(ps->lock)
/* Now carefully unlink all the marked pending URBs */
rescan:
- list_for_each_entry(as, &ps->async_pending, asynclist) {
+ list_for_each_entry_reverse(as, &ps->async_pending, asynclist) {
if (as->bulk_status == AS_UNLINK) {
as->bulk_status = 0; /* Only once */
urb = as->urb;
@@ -636,7 +636,7 @@ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
spin_lock_irqsave(&ps->lock, flags);
while (!list_empty(list)) {
- as = list_entry(list->next, struct async, asynclist);
+ as = list_last_entry(list, struct async, asynclist);
list_del_init(&as->asynclist);
urb = as->urb;
usb_get_urb(urb);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9ae2a7a93df2..f0a259937da8 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -222,7 +222,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = -EBUSY;
goto put_hcd;
}
- hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+ hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start,
hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8c4e5adbf820..3405b146edc9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 6af6add3d4c0..876ff31261d5 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -288,14 +288,9 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
/*
* Need to schedule a work, as there are possible DELAY function calls.
- * Release lock before scheduling workq as it holds spinlock during
- * scheduling.
*/
- if (hsotg->wq_otg) {
- spin_unlock(&hsotg->lock);
+ if (hsotg->wq_otg)
queue_work(hsotg->wq_otg, &hsotg->wf_otg);
- spin_lock(&hsotg->lock);
- }
}
/**
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index b8f2790abf91..3a0dcbfbc827 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -183,6 +183,7 @@ DEFINE_SHOW_ATTRIBUTE(state);
static int fifo_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
+ int fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 val;
int idx;
@@ -196,7 +197,7 @@ static int fifo_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ for (idx = 1; idx <= fifo_count; idx++) {
val = dwc2_readl(hsotg, DPTXFSIZN(idx));
seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 6be10e496e10..88f7d6d4ff2d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3784,15 +3784,26 @@ irq_retry:
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
+ if (BIT(idx) & ~daintmsk)
continue;
epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
- if (epctrl & DXEPCTL_EPENA) {
+ //ISOC Ep's only
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
+ continue;
+ }
+
+ //Non-ISOC EP's
+ if (hs_ep->halted) {
+ if (!(epctrl & DXEPCTL_EPENA))
+ epctrl |= DXEPCTL_EPENA;
+ epctrl |= DXEPCTL_EPDIS;
+ epctrl |= DXEPCTL_STALL;
+ dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
}
}
@@ -4056,11 +4067,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
* a unique tx-fifo even if it is non-periodic.
*/
if (dir_in && hsotg->dedicated_fifos) {
+ unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 fifo_index = 0;
u32 fifo_size = UINT_MAX;
size = hs_ep->ep.maxpacket * hs_ep->mc;
- for (i = 1; i < hsotg->num_of_eps; ++i) {
+ for (i = 1; i <= fifo_count; ++i) {
if (hsotg->fifo_map & (1 << i))
continue;
val = dwc2_readl(hsotg, DPTXFSIZN(i));
@@ -4310,19 +4322,20 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
epctl = dwc2_readl(hs, epreg);
if (value) {
- epctl |= DXEPCTL_STALL;
+ if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
+ dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
+ // STALL bit will be set in GOUTNAKEFF interrupt handler
} else {
epctl &= ~DXEPCTL_STALL;
xfertype = epctl & DXEPCTL_EPTYPE_MASK;
if (xfertype == DXEPCTL_EPTYPE_BULK ||
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
+ dwc2_writel(hs, epctl, epreg);
}
- dwc2_writel(hs, epctl, epreg);
}
hs_ep->halted = value;
-
return 0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 81afe553aa66..b90f858af960 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2824,7 +2824,7 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->params.host_dma) {
+ if (hsotg->params.host_dma && chan->qh) {
if (hsotg->params.dma_desc_enable) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f561c6c9e8a9..1d85c42b9c67 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1246,6 +1246,9 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
/* do nothing */
break;
}
+
+ /* de-assert DRVVBUS for HOST and OTG mode */
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
}
static void dwc3_get_properties(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1c8b349379af..77c4a9abe365 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -688,7 +688,9 @@ struct dwc3_ep {
#define DWC3_EP_STALL BIT(1)
#define DWC3_EP_WEDGE BIT(2)
#define DWC3_EP_TRANSFER_STARTED BIT(3)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_DELAY_START BIT(6)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index c1e9ea621f41..90bb022737da 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/**
- * dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
+ * dwc3-exynos.c - Samsung Exynos DWC3 Specific Glue layer
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -255,4 +255,4 @@ module_platform_driver(dwc3_exynos_driver);
MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
+MODULE_DESCRIPTION("DesignWare USB3 Exynos Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 294276f7deb9..7051611229c9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,7 @@
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
@@ -342,6 +343,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fd1b100d2927..6dee4dabc0a4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1136,8 +1136,10 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
- if (cmd == DWC3_DEPCMD_ENDTRANSFER)
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ }
break;
}
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 154f3f3e8cff..1b8014ab0b25 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -57,7 +57,7 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
return -EINVAL;
}
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
return 0;
}
@@ -111,6 +111,9 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ /* set no action before sending new link state change */
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -1447,6 +1450,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ /* Start the transfer only after the END_TRANSFER is completed */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+ dep->flags |= DWC3_EP_DELAY_START;
+ return 0;
+ }
+
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1828,7 +1837,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc->pullups_connected = false;
}
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
do {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
@@ -2625,8 +2634,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
case DWC3_DEPEVT_STREAMEVT:
@@ -2678,12 +2693,12 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
- struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+ (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
@@ -2693,16 +2708,13 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* much trouble synchronizing between us and gadget driver.
*
* We have discussed this with the IP Provider and it was
- * suggested to giveback all requests here, but give HW some
- * extra time to synchronize with the interconnect. We're using
- * an arbitrary 100us delay for that.
+ * suggested to giveback all requests here.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
- * In short, what we're doing is:
- *
- * - Issue EndTransfer WITH CMDIOC bit set
- * - Wait 100us
+ * In short, what we're doing is issuing EndTransfer with
+ * CMDIOC bit set and delay kicking transfer until the
+ * EndTransfer command had completed.
*
* As of IP version 3.10a of the DWC_usb3 IP, the controller
* supports a mode to work around the above limitation. The
@@ -2711,8 +2723,7 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* by writing GUCTL2[14]. This polling is already done in the
* dwc3_send_gadget_ep_cmd() function so if the mode is
* enabled, the EndTransfer command will have completed upon
- * returning from this function and we don't need to delay for
- * 100us.
+ * returning from this function.
*
* This mode is NOT available on the DWC_usb31 IP.
*/
@@ -2728,9 +2739,8 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
-
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
- udelay(100);
+ else
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
@@ -2759,12 +2769,12 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
+ dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
dwc3_disconnect_gadget(dwc);
@@ -2816,7 +2826,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
dwc->test_mode = false;
dwc3_clear_stall_all_ep(dwc);
@@ -2920,11 +2930,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
}
dep = dwc->eps[0];
@@ -3033,7 +3043,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
reg &= ~u1u2;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
break;
default:
/* do nothing */
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 5faf4d1249e0..fbc7d8013f0b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -127,4 +127,18 @@ static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id);
}
+/**
+ * dwc3_gadget_dctl_write_safe - write to DCTL safe from link state change
+ * @dwc: pointer to our context structure
+ * @value: value to write to DCTL
+ *
+ * Use this function when doing read-modify-write to DCTL. It will not
+ * send link state change request.
+ */
+static inline void dwc3_gadget_dctl_write_safe(struct dwc3 *dwc, u32 value)
+{
+ value &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, value);
+}
+
#endif /* __DRIVERS_USB_DWC3_GADGET_H */
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 5567ed2cddbe..fa252870c926 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc)
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
- props[prop_idx++].name = "usb3-lpm-capable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
if (dwc->usb2_lpm_disable)
- props[prop_idx++].name = "usb2-lpm-disable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
@@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc)
* This following flag tells XHCI to do just that.
*/
if (dwc->revision <= DWC3_REVISION_300A)
- props[prop_idx++].name = "quirk-broken-port-ped";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
ret = platform_device_add_properties(xhci, props);
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index cac991173ac0..971c6b92484a 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -971,7 +971,7 @@ static int __init xdbc_init(void)
goto free_and_quit;
}
- base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
+ base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
if (!base) {
xdbc_trace("failed to remap the io address\n");
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 02ff850278b1..c6db0a0a340c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -483,34 +483,6 @@ config USB_CONFIGFS_F_TCM
Both protocols can work on USB2.0 and USB3.0.
UAS utilizes the USB 3.0 feature called streams support.
-choice
- tristate "USB Gadget precomposed configurations"
- default USB_ETH
- optional
- help
- A Linux "Gadget Driver" talks to the USB Peripheral Controller
- driver through the abstract "gadget" API. Some other operating
- systems call these "client" drivers, of which "class drivers"
- are a subset (implementing a USB device class specification).
- A gadget driver implements one or more USB functions using
- the peripheral hardware.
-
- Gadget drivers are hardware-neutral, or "platform independent",
- except that they sometimes must understand quirks or limitations
- of the particular controllers they work with. For example, when
- a controller doesn't support alternate configurations or provide
- enough of the right types of endpoints, the gadget driver might
- not be able work with that controller, or might need to implement
- a less common variant of a device class protocol.
-
- The available choices each represent a single precomposed USB
- gadget configuration. In the device model, each option contains
- both the device instantiation as a child for a USB gadget
- controller, and the relevant drivers for each function declared
- by the device.
-
source "drivers/usb/gadget/legacy/Kconfig"
-endchoice
-
endif # USB_GADGET
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ab9ac48a751a..32b637e3e1fa 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -293,6 +293,47 @@ err:
return ret;
}
+static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
+ char *page)
+{
+ enum usb_device_speed speed = to_gadget_info(item)->composite.max_speed;
+
+ return sprintf(page, "%s\n", usb_speed_string(speed));
+}
+
+static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+
+ mutex_lock(&gi->lock);
+
+ /* Prevent changing of max_speed after the driver is binded */
+ if (gi->composite.gadget_driver.udc_name)
+ goto err;
+
+ if (strncmp(page, "super-speed-plus", 16) == 0)
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
+ else if (strncmp(page, "super-speed", 11) == 0)
+ gi->composite.max_speed = USB_SPEED_SUPER;
+ else if (strncmp(page, "high-speed", 10) == 0)
+ gi->composite.max_speed = USB_SPEED_HIGH;
+ else if (strncmp(page, "full-speed", 10) == 0)
+ gi->composite.max_speed = USB_SPEED_FULL;
+ else if (strncmp(page, "low-speed", 9) == 0)
+ gi->composite.max_speed = USB_SPEED_LOW;
+ else
+ goto err;
+
+ gi->composite.gadget_driver.max_speed = gi->composite.max_speed;
+
+ mutex_unlock(&gi->lock);
+ return len;
+err:
+ mutex_unlock(&gi->lock);
+ return -EINVAL;
+}
+
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceSubClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceProtocol);
@@ -302,6 +343,7 @@ CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
CONFIGFS_ATTR(gadget_dev_desc_, UDC);
+CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bDeviceClass,
@@ -313,6 +355,7 @@ static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bcdDevice,
&gadget_dev_desc_attr_bcdUSB,
&gadget_dev_desc_attr_UDC,
+ &gadget_dev_desc_attr_max_speed,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 460d5d7c984f..7f5cf488b2b1 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -52,6 +52,7 @@ struct f_ecm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
/* FIXME is_open needs some irq-ish locking
@@ -380,7 +381,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ecm->notify_count))
return;
event = req->buf;
@@ -420,10 +421,10 @@ static void ecm_do_notify(struct f_ecm *ecm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ecm->ctrl_id);
- ecm->notify_req = NULL;
+ atomic_inc(&ecm->notify_count);
status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
if (status < 0) {
- ecm->notify_req = req;
+ atomic_dec(&ecm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -448,17 +449,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
switch (req->status) {
case 0:
/* no fault */
+ atomic_dec(&ecm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ecm->notify_count, 0);
ecm->notify_state = ECM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ecm->notify_count);
break;
}
- ecm->notify_req = req;
ecm_do_notify(ecm);
}
@@ -907,6 +910,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
+ if (atomic_read(&ecm->notify_count)) {
+ usb_ep_dequeue(ecm->notify, ecm->notify_req);
+ atomic_set(&ecm->notify_count, 0);
+ }
+
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0bbccac94d6c..6f8b67e61771 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1062,6 +1062,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
+ req->num_sgs = 0;
}
req->length = data_len;
@@ -1105,6 +1106,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
+ req->num_sgs = 0;
}
req->length = data_len;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 2d6e76e4cffa..1d900081b1f0 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -53,6 +53,7 @@ struct f_ncm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
const struct ndp_parser_opts *parser_opts;
@@ -547,7 +548,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ncm->notify_count))
return;
event = req->buf;
@@ -587,7 +588,8 @@ static void ncm_do_notify(struct f_ncm *ncm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ncm->ctrl_id);
- ncm->notify_req = NULL;
+ atomic_inc(&ncm->notify_count);
+
/*
* In double buffering if there is a space in FIFO,
* completion callback can be called right after the call,
@@ -597,7 +599,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
spin_lock(&ncm->lock);
if (status < 0) {
- ncm->notify_req = req;
+ atomic_dec(&ncm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -632,17 +634,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
case 0:
VDBG(cdev, "Notification %02x sent\n",
event->bNotificationType);
+ atomic_dec(&ncm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ncm->notify_count, 0);
ncm->notify_state = NCM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ncm->notify_count);
break;
}
- ncm->notify_req = req;
ncm_do_notify(ncm);
spin_unlock(&ncm->lock);
}
@@ -1649,6 +1653,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
+ if (atomic_read(&ncm->notify_count)) {
+ usb_ep_dequeue(ncm->notify, ncm->notify_req);
+ atomic_set(&ncm->notify_count, 0);
+ }
+
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 7ec6a996af26..6d956f190f5a 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -239,18 +239,6 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, prm->hw_ptr);
}
-static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
-}
-
-static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
static int uac_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -326,9 +314,6 @@ static int uac_pcm_null(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops uac_pcm_ops = {
.open = uac_pcm_open,
.close = uac_pcm_null,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uac_pcm_hw_params,
- .hw_free = uac_pcm_hw_free,
.trigger = uac_pcm_trigger,
.pointer = uac_pcm_pointer,
.prepare = uac_pcm_null,
@@ -422,7 +407,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
struct usb_ep *ep;
struct uac_rtd_params *prm;
struct uac_params *params = &audio_dev->params;
- unsigned int factor, rate;
+ unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
@@ -441,13 +426,15 @@ int u_audio_start_playback(struct g_audio *audio_dev)
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
- rate = params->p_srate * uac->p_framesize;
uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+ uac->p_pktsize = min_t(unsigned int,
+ uac->p_framesize *
+ (params->p_srate / uac->p_interval),
prm->max_psize);
if (uac->p_pktsize < prm->max_psize)
- uac->p_pktsize_residue = rate % uac->p_interval;
+ uac->p_pktsize_residue = uac->p_framesize *
+ (params->p_srate % uac->p_interval);
else
uac->p_pktsize_residue = 0;
@@ -584,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
strlcpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- NULL, 0, BUFF_SIZE_MAX);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 119a4e47681f..6e7e1a9202e6 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -14,6 +14,32 @@
# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
#
+choice
+ tristate "USB Gadget precomposed configurations"
+ default USB_ETH
+ optional
+ help
+ A Linux "Gadget Driver" talks to the USB Peripheral Controller
+ driver through the abstract "gadget" API. Some other operating
+ systems call these "client" drivers, of which "class drivers"
+ are a subset (implementing a USB device class specification).
+ A gadget driver implements one or more USB functions using
+ the peripheral hardware.
+
+ Gadget drivers are hardware-neutral, or "platform independent",
+ except that they sometimes must understand quirks or limitations
+ of the particular controllers they work with. For example, when
+ a controller doesn't support alternate configurations or provide
+ enough of the right types of endpoints, the gadget driver might
+ not be able work with that controller, or might need to implement
+ a less common variant of a device class protocol.
+
+ The available choices each represent a single precomposed USB
+ gadget configuration. In the device model, each option contains
+ both the device instantiation as a child for a USB gadget
+ controller, and the relevant drivers for each function declared
+ by the device.
+
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -489,3 +515,5 @@ config USB_G_WEBCAM
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_webcam".
+
+endchoice
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index da1c37933ca1..8d7a556ece30 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -225,7 +225,7 @@ static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = cdc_bind,
.unbind = cdc_unbind,
};
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index b640ed3fcf70..ae6d8f7092b8 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -149,7 +149,7 @@ static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gfs_bind,
.unbind = gfs_unbind,
};
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index 50515f9e1022..ec9749845660 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -482,7 +482,7 @@ static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = multi_bind,
.unbind = multi_unbind,
.needs_serial = 1,
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 8465f081e921..c61e71ba7045 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -197,7 +197,7 @@ static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gncm_bind,
.unbind = gncm_unbind,
};
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57b6f66331cf..bfd1c9e80a1f 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -116,7 +116,7 @@ static int udc_pci_probe(
goto err_memreg;
}
- dev->virt_addr = ioremap_nocache(resource, len);
+ dev->virt_addr = ioremap(resource, len);
if (!dev->virt_addr) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 8a42768e3213..6e0432141c40 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1122,7 +1122,7 @@ static struct usb_endpoint_descriptor usba_ep0_desc = {
.bInterval = 1,
};
-static struct usb_gadget usba_gadget_template = {
+static const struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 51fa614b4079..9b11046480fe 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1414,6 +1414,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
}
mutex_unlock(&udc_lock);
+ if (ret)
+ pr_warn("udc-core: couldn't find an available UDC or it's busy\n");
return ret;
found:
ret = udc_bind_to_driver(udc, driver);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index c3721225b61e..4a46f661d0e4 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1782,7 +1782,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->got_region = 1;
- base = ioremap_nocache(resource, len);
+ base = ioremap(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 64d80c65bb96..aaf975c809bf 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2175,8 +2175,6 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- spin_lock(&dev->lock);
-
/* Inside lock so that no gadget can use this udc until probe is done */
retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
if (retval) {
@@ -2185,15 +2183,21 @@ static int gr_probe(struct platform_device *pdev)
}
dev->added = 1;
+ spin_lock(&dev->lock);
+
retval = gr_udc_init(dev);
- if (retval)
+ if (retval) {
+ spin_unlock(&dev->lock);
goto out;
-
- gr_dfs_create(dev);
+ }
/* Clear all interrupt enables that might be left on since last boot */
gr_disable_interrupts_and_pullup(dev);
+ spin_unlock(&dev->lock);
+
+ gr_dfs_create(dev);
+
retval = gr_request_irq(dev, dev->irq);
if (retval) {
dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
@@ -2222,8 +2226,6 @@ static int gr_probe(struct platform_device *pdev)
dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
out:
- spin_unlock(&dev->lock);
-
if (retval)
gr_remove(pdev);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 247de0faaeb7..a8273b589456 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2323,7 +2323,7 @@ net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
goto err;
}
- mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
@@ -2401,7 +2401,7 @@ net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
goto err;
}
- mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
@@ -2625,7 +2625,7 @@ net2272_plat_probe(struct platform_device *pdev)
ret = -EBUSY;
goto err;
}
- dev->base_addr = ioremap_nocache(base, len);
+ dev->base_addr = ioremap(base, len);
if (!dev->base_addr) {
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 51efee21915f..1fd1b9186e46 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -3659,7 +3659,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* 8051 code into the chip, e.g. to turn on PCI PM.
*/
- base = ioremap_nocache(resource, len);
+ base = ioremap(resource, len);
if (base == NULL) {
ep_dbg(dev, "can't map memory\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index f36f0730afab..bd12417996db 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2757,7 +2757,7 @@ static int omap_udc_probe(struct platform_device *pdev)
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
+ resource_size(&pdev->resource[0]),
driver_name)) {
DBG("request_mem_region failed\n");
return -EBUSY;
@@ -2934,7 +2934,7 @@ cleanup0:
}
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ resource_size(&pdev->resource[0]));
return status;
}
@@ -2950,7 +2950,7 @@ static int omap_udc_remove(struct platform_device *pdev)
wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ resource_size(&pdev->resource[0]));
return 0;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d730180db06..55bdfdf11e4c 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -186,7 +186,7 @@ config USB_EHCI_FSL
config USB_EHCI_MXC
tristate "Support for Freescale i.MX on-chip EHCI USB controller"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select USB_EHCI_ROOT_HUB_TT
---help---
Variation of ARC USB block used in some Freescale chips.
@@ -210,8 +210,8 @@ config USB_EHCI_HCD_OMAP
config USB_EHCI_HCD_ORION
tristate "Support for Marvell EBU on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU)
- default y
+ depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU || COMPILE_TEST)
+ default y if (PLAT_ORION || ARCH_MVEBU)
---help---
Enables support for the on-chip EHCI controller on Marvell's
embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
@@ -221,15 +221,15 @@ config USB_EHCI_HCD_ORION
config USB_EHCI_HCD_SPEAR
tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
+ depends on USB_EHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+ default y if PLAT_SPEAR
---help---
Enables support for the on-chip EHCI controller on
ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
select GENERIC_PHY
select USB_EHCI_HCD_PLATFORM
help
@@ -238,8 +238,8 @@ config USB_EHCI_HCD_STI
config USB_EHCI_HCD_AT91
tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
+ depends on USB_EHCI_HCD && (ARCH_AT91 || COMPILE_TEST)
+ default y if ARCH_AT91
---help---
Enables support for the on-chip EHCI controller on
Atmel chips.
@@ -263,20 +263,20 @@ config USB_EHCI_HCD_PPC_OF
config USB_EHCI_SH
bool "EHCI support for SuperH USB controller"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
---help---
Enables support for the on-chip EHCI controller on the SuperH.
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ tristate "EHCI support for Samsung S5P/Exynos SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
- depends on (ARCH_PXA || ARCH_MMP)
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
select USB_EHCI_ROOT_HUB_TT
---help---
Enables support for Marvell (including PXA and MMP series) on-chip
@@ -289,7 +289,7 @@ config USB_EHCI_MV
config USB_CNS3XXX_EHCI
bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
select USB_EHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
@@ -410,15 +410,15 @@ config USB_OHCI_HCD_OMAP1
config USB_OHCI_HCD_SPEAR
tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
+ depends on USB_OHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+ default y if PLAT_SPEAR
---help---
Enables support for the on-chip OHCI controller on
ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
select GENERIC_PHY
select USB_OHCI_HCD_PLATFORM
help
@@ -427,8 +427,8 @@ config USB_OHCI_HCD_STI
config USB_OHCI_HCD_S3C2410
tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX || COMPILE_TEST)
+ default y if (ARCH_S3C24XX || ARCH_S3C64XX)
---help---
Enables support for the on-chip OHCI controller on
S3C24xx/S3C64xx chips.
@@ -453,17 +453,17 @@ config USB_OHCI_HCD_PXA27X
config USB_OHCI_HCD_AT91
tristate "Support for Atmel on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && ARCH_AT91 && OF
- default y
+ depends on USB_OHCI_HCD && (ARCH_AT91 || COMPILE_TEST) && OF
+ default y if ARCH_AT91
---help---
Enables support for the on-chip OHCI controller on
Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
- depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
+ depends on ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
- default y
+ default y if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
help
This option is deprecated now and the driver was removed, use
USB_OHCI_HCD_PLATFORM instead.
@@ -473,10 +473,10 @@ config USB_OHCI_HCD_OMAP3
config USB_OHCI_HCD_DAVINCI
tristate "OHCI support for TI DaVinci DA8xx"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
depends on USB_OHCI_HCD
select PHY_DA8XX_USB
- default y
+ default y if ARCH_DAVINCI_DA8XX
help
Enables support for the DaVinci DA8xx integrated OHCI
controller. This driver cannot currently be a loadable
@@ -532,7 +532,7 @@ config USB_OHCI_HCD_SSB
config USB_OHCI_SH
bool "OHCI support for SuperH USB controller (DEPRECATED)"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
@@ -542,14 +542,14 @@ config USB_OHCI_SH
If you use the PCI OHCI controller, this option is not necessary.
config USB_OHCI_EXYNOS
- tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ tristate "OHCI support for Samsung S5P/Exynos SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 01debfd03d4a..a4e9abcbdc4f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * SAMSUNG EXYNOS USB HOST EHCI Controller
+ * Samsung Exynos USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -21,7 +21,7 @@
#include "ehci.h"
-#define DRIVER_DESC "EHCI EXYNOS driver"
+#define DRIVER_DESC "EHCI Exynos driver"
#define EHCI_INSNREG00(base) (base + 0x90)
#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 66ec1fdf9fe7..bd4f6ef534d9 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/usb/otg.h>
+#include <linux/usb/of.h>
#include <linux/platform_data/mv_usb.h>
#include <linux/io.h>
@@ -67,6 +68,8 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 status;
int retval;
if (ehci_mv == NULL) {
@@ -80,6 +83,14 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
if (retval)
dev_err(dev, "ehci_setup failed %d\n", retval);
+ if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+ status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ status |= PORT_TEST_FORCE;
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ status &= ~PORT_TEST_FORCE;
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ }
+
return retval;
}
@@ -116,7 +127,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci_mv->set_vbus = pdata->set_vbus;
}
- ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+ ehci_mv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(ehci_mv->phy)) {
retval = PTR_ERR(ehci_mv->phy);
if (retval != -EPROBE_DEFER)
@@ -164,7 +175,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
}
ehci = hcd_to_ehci(hcd);
- ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+ ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
@@ -246,10 +257,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
MODULE_ALIAS("mv-ehci");
static const struct platform_device_id ehci_id_table[] = {
- {"pxa-u2oehci", PXA_U2OEHCI},
- {"pxa-sph", PXA_SPH},
- {"mmp3-hsic", MMP3_HSIC},
- {"mmp3-fsic", MMP3_FSIC},
+ {"pxa-u2oehci", 0},
+ {"pxa-sph", 0},
{},
};
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index a2b610dbedfc..2d462fbbe0a6 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -107,7 +107,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
if (!request_mem_region(res->start, res_len, "mab regs"))
return -EBUSY;
- dev->mab_regs = ioremap_nocache(res->start, res_len);
+ dev->mab_regs = ioremap(res->start, res_len);
if (dev->mab_regs == NULL) {
retval = -ENOMEM;
goto err1;
@@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
retval = -EBUSY;
goto err2;
}
- dev->usbid_regs = ioremap_nocache(res->start, res_len);
+ dev->usbid_regs = ioremap(res->start, res_len);
if (dev->usbid_regs == NULL) {
retval = -ENOMEM;
goto err3;
@@ -178,7 +178,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver,
retval = -EBUSY;
goto err1;
}
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
pr_debug("ioremap failed");
retval = -ENOMEM;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 2afde14dc425..c25c51d26f26 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -8,7 +8,6 @@
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/platform_data/ehci-sh.h>
struct ehci_sh_priv {
struct clk *iclk, *fclk;
@@ -76,7 +75,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct ehci_sh_priv *priv;
- struct ehci_sh_platdata *pdata;
struct usb_hcd *hcd;
int irq, ret;
@@ -89,8 +87,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- pdata = dev_get_platdata(&pdev->dev);
-
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
@@ -127,9 +123,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
clk_enable(priv->fclk);
clk_enable(priv->iclk);
- if (pdata && pdata->phy_init)
- pdata->phy_init();
-
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd");
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4d2cdec4cb78..d6433f206c17 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -42,12 +42,10 @@ struct tegra_ehci_soc_config {
};
struct tegra_ehci_hcd {
- struct tegra_usb_phy *phy;
struct clk *clk;
struct reset_control *rst;
int port_resuming;
bool needs_double_reset;
- enum tegra_usb_phy_port_speed port_speed;
};
static int tegra_reset_usb_controller(struct platform_device *pdev)
@@ -480,12 +478,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
u_phy->otg->host = hcd_to_bus(hcd);
- err = usb_phy_set_suspend(hcd->usb_phy, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to power on the phy\n");
- goto cleanup_phy;
- }
-
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
@@ -521,16 +513,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+ usb_remove_hcd(hcd);
otg_set_host(hcd->usb_phy->otg, NULL);
-
usb_phy_shutdown(hcd->usb_phy);
- usb_remove_hcd(hcd);
-
- reset_control_assert(tegra->rst);
- udelay(1);
-
clk_disable_unprepare(tegra->clk);
-
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index d5ce98e205c7..bd40e597f256 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -19,7 +19,7 @@
#include "ohci.h"
-#define DRIVER_DESC "OHCI EXYNOS driver"
+#define DRIVER_DESC "OHCI Exynos driver"
static const char hcd_name[] = "ohci-exynos";
static struct hc_driver __read_mostly exynos_ohci_hc_driver;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index fe09b8626329..120666a0d590 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2783,11 +2783,15 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
- for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
- (void) oxu_hub_control(oxu_to_hcd(oxu),
- is_on ? SetPortFeature : ClearPortFeature,
- USB_PORT_FEAT_POWER,
- port--, NULL, 0);
+ for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
+ if (is_on)
+ oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
+ USB_PORT_FEAT_POWER, port--, NULL, 0);
+ else
+ oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
+ USB_PORT_FEAT_POWER, port--, NULL, 0);
+ }
+
msleep(20);
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 6c7f0a876b96..beb2efa71341 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -1150,7 +1150,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+ base = ioremap(pci_resource_start(pdev, 0), len);
if (base == NULL)
return;
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b18a6baef204..bfbdb3ceed29 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -488,11 +488,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto disable_clk;
}
- /* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- goto disable_clk;
-
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index bf9065438320..8163aefc6c6b 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -11,6 +11,7 @@
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -38,7 +39,15 @@
#define XUSB_CFG_4 0x010
#define XUSB_BASE_ADDR_SHIFT 15
#define XUSB_BASE_ADDR_MASK 0x1ffff
+#define XUSB_CFG_16 0x040
+#define XUSB_CFG_24 0x060
+#define XUSB_CFG_AXI_CFG 0x0f8
#define XUSB_CFG_ARU_C11_CSBRANGE 0x41c
+#define XUSB_CFG_ARU_CONTEXT 0x43c
+#define XUSB_CFG_ARU_CONTEXT_HS_PLS 0x478
+#define XUSB_CFG_ARU_CONTEXT_FS_PLS 0x47c
+#define XUSB_CFG_ARU_CONTEXT_HSFS_SPEED 0x480
+#define XUSB_CFG_ARU_CONTEXT_HSFS_PP 0x484
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
@@ -62,11 +71,20 @@
#define MBOX_SMI_INTR_EN BIT(3)
/* IPFS registers */
+#define IPFS_XUSB_HOST_MSI_BAR_SZ_0 0x0c0
+#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0 0x0c4
+#define IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0 0x0c8
+#define IPFS_XUSB_HOST_MSI_VEC0_0 0x100
+#define IPFS_XUSB_HOST_MSI_EN_VEC0_0 0x140
#define IPFS_XUSB_HOST_CONFIGURATION_0 0x180
#define IPFS_EN_FPCI BIT(0)
+#define IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0 0x184
#define IPFS_XUSB_HOST_INTR_MASK_0 0x188
#define IPFS_IP_INT_MASK BIT(16)
+#define IPFS_XUSB_HOST_INTR_ENABLE_0 0x198
+#define IPFS_XUSB_HOST_UFPCI_CONFIG_0 0x19c
#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0 0x1bc
+#define IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0 0x1dc
#define CSB_PAGE_SELECT_MASK 0x7fffff
#define CSB_PAGE_SELECT_SHIFT 9
@@ -101,6 +119,8 @@
#define L2IMEMOP_ACTION_SHIFT 24
#define L2IMEMOP_INVALIDATE_ALL (0x40 << L2IMEMOP_ACTION_SHIFT)
#define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT 0x101a18
+#define L2IMEMOP_RESULT_VLD BIT(31)
#define XUSB_CSB_MP_APMAP 0x10181c
#define APMAP_BOOTPATH BIT(31)
@@ -145,19 +165,32 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
-struct tega_xusb_mbox_regs {
+struct tegra_xusb_mbox_regs {
u16 cmd;
u16 data_in;
u16 data_out;
u16 owner;
};
+struct tegra_xusb_context_soc {
+ struct {
+ const unsigned int *offsets;
+ unsigned int num_offsets;
+ } ipfs;
+
+ struct {
+ const unsigned int *offsets;
+ unsigned int num_offsets;
+ } fpci;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
unsigned int num_supplies;
const struct tegra_xusb_phy_type *phy_types;
unsigned int num_types;
+ const struct tegra_xusb_context_soc *context;
struct {
struct {
@@ -166,12 +199,17 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
- struct tega_xusb_mbox_regs mbox;
+ struct tegra_xusb_mbox_regs mbox;
bool scale_ss_clock;
bool has_ipfs;
};
+struct tegra_xusb_context {
+ u32 *ipfs;
+ u32 *fpci;
+};
+
struct tegra_xusb {
struct device *dev;
void __iomem *regs;
@@ -218,6 +256,8 @@ struct tegra_xusb {
void *virt;
dma_addr_t phys;
} fw;
+
+ struct tegra_xusb_context context;
};
static struct hc_driver __read_mostly tegra_xhci_hc_driver;
@@ -623,9 +663,9 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void tegra_xusb_config(struct tegra_xusb *tegra,
- struct resource *regs)
+static void tegra_xusb_config(struct tegra_xusb *tegra)
{
+ u32 regs = tegra->hcd->rsrc_start;
u32 value;
if (tegra->soc->has_ipfs) {
@@ -639,7 +679,7 @@ static void tegra_xusb_config(struct tegra_xusb *tegra,
/* Program BAR0 space */
value = fpci_readl(tegra, XUSB_CFG_4);
value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
- value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+ value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_4);
usleep_range(100, 200);
@@ -793,17 +833,34 @@ disable_clk:
return err;
}
-static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+
+ tegra->context.ipfs = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ sizeof(u32), GFP_KERNEL);
+ if (!tegra->context.ipfs)
+ return -ENOMEM;
+
+ tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ sizeof(u32), GFP_KERNEL);
+ if (!tegra->context.fpci)
+ return -ENOMEM;
+
+ return 0;
+}
+#else
+static inline int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+ return 0;
+}
+#endif
+
+static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
{
- unsigned int code_tag_blocks, code_size_blocks, code_blocks;
struct tegra_xusb_fw_header *header;
- struct device *dev = tegra->dev;
const struct firmware *fw;
- unsigned long timeout;
- time64_t timestamp;
- struct tm time;
- u64 address;
- u32 value;
int err;
err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
@@ -828,6 +885,26 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
release_firmware(fw);
+ return 0;
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+ unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+ struct xhci_cap_regs __iomem *cap = tegra->regs;
+ struct tegra_xusb_fw_header *header;
+ struct device *dev = tegra->dev;
+ struct xhci_op_regs __iomem *op;
+ unsigned long timeout;
+ time64_t timestamp;
+ struct tm time;
+ u64 address;
+ u32 value;
+ int err;
+
+ header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+ op = tegra->regs + HC_LENGTH(readl(&cap->hc_capbase));
+
if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
csb_readl(tegra, XUSB_FALC_CPUCTL));
@@ -882,26 +959,37 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
csb_writel(tegra, 0, XUSB_FALC_DMACTL);
- msleep(50);
+ /* wait for RESULT_VLD to get set */
+#define tegra_csb_readl(offset) csb_readl(tegra, offset)
+ err = readx_poll_timeout(tegra_csb_readl,
+ XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT, value,
+ value & L2IMEMOP_RESULT_VLD, 100, 10000);
+ if (err < 0) {
+ dev_err(dev, "DMA controller not ready %#010x\n", value);
+ return err;
+ }
+#undef tegra_csb_readl
csb_writel(tegra, le32_to_cpu(header->boot_codetag),
XUSB_FALC_BOOTVEC);
- /* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
- timeout = jiffies + msecs_to_jiffies(5);
-
+ /* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
- while (time_before(jiffies, timeout)) {
- if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+ timeout = jiffies + msecs_to_jiffies(200);
+
+ do {
+ value = readl(&op->status);
+ if ((value & STS_CNR) == 0)
break;
- usleep_range(100, 200);
- }
+ usleep_range(1000, 2000);
+ } while (time_is_after_jiffies(timeout));
- if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
- dev_err(dev, "Falcon failed to start, state: %#x\n",
- csb_readl(tegra, XUSB_FALC_CPUCTL));
+ value = readl(&op->status);
+ if (value & STS_CNR) {
+ value = csb_readl(tegra, XUSB_FALC_CPUCTL);
+ dev_err(dev, "XHCI controller not read: %#010x\n", value);
return -EIO;
}
@@ -966,11 +1054,37 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
return 0;
}
-static int tegra_xusb_probe(struct platform_device *pdev)
+static int __tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *regs;
+ int err;
+
+ /* Enable firmware messages from controller. */
+ msg.cmd = MBOX_CMD_MSG_ENABLED;
+ msg.data = 0;
+
+ err = tegra_xusb_mbox_send(tegra, &msg);
+ if (err < 0)
+ dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+
+ return err;
+}
+
+static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
+{
+ int err;
+
+ mutex_lock(&tegra->lock);
+ err = __tegra_xusb_enable_firmware_messages(tegra);
+ mutex_unlock(&tegra->lock);
+
+ return err;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
struct tegra_xusb *tegra;
+ struct resource *regs;
struct xhci_hcd *xhci;
unsigned int i, j, k;
struct phy *phy;
@@ -986,6 +1100,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
mutex_init(&tegra->lock);
tegra->dev = &pdev->dev;
+ err = tegra_xusb_init_context(tegra);
+ if (err < 0)
+ return err;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(tegra->regs))
@@ -1173,6 +1291,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
+ tegra->hcd->regs = tegra->regs;
+ tegra->hcd->rsrc_start = regs->start;
+ tegra->hcd->rsrc_len = resource_size(regs);
+
/*
* This must happen after usb_create_hcd(), because usb_create_hcd()
* will overwrite the drvdata of the device with the hcd it creates.
@@ -1185,19 +1307,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_hcd;
}
- pm_runtime_enable(&pdev->dev);
- if (pm_runtime_enabled(&pdev->dev))
- err = pm_runtime_get_sync(&pdev->dev);
- else
- err = tegra_xusb_runtime_resume(&pdev->dev);
-
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_phy;
- }
-
- tegra_xusb_config(tegra, regs);
-
/*
* The XUSB Falcon microcontroller can only address 40 bits, so set
* the DMA mask accordingly.
@@ -1205,19 +1314,35 @@ static int tegra_xusb_probe(struct platform_device *pdev)
err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
- goto put_rpm;
+ goto disable_phy;
+ }
+
+ err = tegra_xusb_request_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (!pm_runtime_enabled(&pdev->dev))
+ err = tegra_xusb_runtime_resume(&pdev->dev);
+ else
+ err = pm_runtime_get_sync(&pdev->dev);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable device: %d\n", err);
+ goto free_firmware;
}
+ tegra_xusb_config(tegra);
+
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
goto put_rpm;
}
- tegra->hcd->regs = tegra->regs;
- tegra->hcd->rsrc_start = regs->start;
- tegra->hcd->rsrc_len = resource_size(regs);
-
err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
@@ -1244,21 +1369,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_usb3;
}
- mutex_lock(&tegra->lock);
-
- /* Enable firmware messages from controller. */
- msg.cmd = MBOX_CMD_MSG_ENABLED;
- msg.data = 0;
-
- err = tegra_xusb_mbox_send(tegra, &msg);
+ err = tegra_xusb_enable_firmware_messages(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
- mutex_unlock(&tegra->lock);
goto remove_usb3;
}
- mutex_unlock(&tegra->lock);
-
err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
tegra_xusb_mbox_irq,
tegra_xusb_mbox_thread, 0,
@@ -1281,6 +1397,9 @@ put_rpm:
tegra_xusb_runtime_suspend(&pdev->dev);
put_hcd:
usb_put_hcd(tegra->hcd);
+free_firmware:
+ dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+ tegra->fw.phys);
disable_phy:
tegra_xusb_phy_disable(tegra);
pm_runtime_disable(&pdev->dev);
@@ -1328,22 +1447,176 @@ static int tegra_xusb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
+{
+ struct device *dev = hub->hcd->self.controller;
+ bool status = true;
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < hub->num_ports; i++) {
+ value = readl(hub->ports[i]->addr);
+ if ((value & PORT_PE) == 0)
+ continue;
+
+ if ((value & PORT_PLS_MASK) != XDEV_U3) {
+ dev_info(dev, "%u-%u isn't suspended: %#010x\n",
+ hub->hcd->self.busnum, i + 1, value);
+ status = false;
+ }
+ }
+
+ return status;
+}
+
+static int tegra_xusb_check_ports(struct tegra_xusb *tegra)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (!xhci_hub_ports_suspended(&xhci->usb2_rhub) ||
+ !xhci_hub_ports_suspended(&xhci->usb3_rhub))
+ err = -EBUSY;
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return err;
+}
+
+static void tegra_xusb_save_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+ struct tegra_xusb_context *ctx = &tegra->context;
+ unsigned int i;
+
+ if (soc->ipfs.num_offsets > 0) {
+ for (i = 0; i < soc->ipfs.num_offsets; i++)
+ ctx->ipfs[i] = ipfs_readl(tegra, soc->ipfs.offsets[i]);
+ }
+
+ if (soc->fpci.num_offsets > 0) {
+ for (i = 0; i < soc->fpci.num_offsets; i++)
+ ctx->fpci[i] = fpci_readl(tegra, soc->fpci.offsets[i]);
+ }
+}
+
+static void tegra_xusb_restore_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+ struct tegra_xusb_context *ctx = &tegra->context;
+ unsigned int i;
+
+ if (soc->fpci.num_offsets > 0) {
+ for (i = 0; i < soc->fpci.num_offsets; i++)
+ fpci_writel(tegra, ctx->fpci[i], soc->fpci.offsets[i]);
+ }
+
+ if (soc->ipfs.num_offsets > 0) {
+ for (i = 0; i < soc->ipfs.num_offsets; i++)
+ ipfs_writel(tegra, ctx->ipfs[i], soc->ipfs.offsets[i]);
+ }
+}
+
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ int err;
+
+ err = tegra_xusb_check_ports(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "not all ports suspended: %d\n", err);
+ return err;
+ }
+
+ err = xhci_suspend(xhci, wakeup);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
+ return err;
+ }
+
+ tegra_xusb_save_context(tegra);
+ tegra_xusb_phy_disable(tegra);
+ tegra_xusb_clk_disable(tegra);
+
+ return 0;
+}
+
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ int err;
+
+ err = tegra_xusb_clk_enable(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable PHYs: %d\n", err);
+ goto disable_clk;
+ }
+
+ tegra_xusb_config(tegra);
+ tegra_xusb_restore_context(tegra);
+
+ err = tegra_xusb_load_firmware(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to load firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ err = __tegra_xusb_enable_firmware_messages(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+ goto disable_phy;
+ }
+
+ err = xhci_resume(xhci, true);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
+ goto disable_phy;
+ }
+
+ return 0;
+
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+disable_clk:
+ tegra_xusb_clk_disable(tegra);
+ return err;
+}
+
static int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
bool wakeup = device_may_wakeup(dev);
+ int err;
+
+ synchronize_irq(tegra->mbox_irq);
- /* TODO: Powergate controller across suspend/resume. */
- return xhci_suspend(xhci, wakeup);
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_enter_elpg(tegra, wakeup);
+ mutex_unlock(&tegra->lock);
+
+ return err;
}
static int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ bool wakeup = device_may_wakeup(dev);
+ int err;
- return xhci_resume(xhci, 0);
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_exit_elpg(tegra, wakeup);
+ mutex_unlock(&tegra->lock);
+
+ return err;
}
#endif
@@ -1370,12 +1643,50 @@ static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
{ .name = "hsic", .num = 2, },
};
+static const unsigned int tegra124_xusb_context_ipfs[] = {
+ IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+ IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+ IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0,
+ IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0,
+ IPFS_XUSB_HOST_MSI_VEC0_0,
+ IPFS_XUSB_HOST_MSI_EN_VEC0_0,
+ IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0,
+ IPFS_XUSB_HOST_INTR_MASK_0,
+ IPFS_XUSB_HOST_INTR_ENABLE_0,
+ IPFS_XUSB_HOST_UFPCI_CONFIG_0,
+ IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0,
+ IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0,
+};
+
+static const unsigned int tegra124_xusb_context_fpci[] = {
+ XUSB_CFG_ARU_CONTEXT_HS_PLS,
+ XUSB_CFG_ARU_CONTEXT_FS_PLS,
+ XUSB_CFG_ARU_CONTEXT_HSFS_SPEED,
+ XUSB_CFG_ARU_CONTEXT_HSFS_PP,
+ XUSB_CFG_ARU_CONTEXT,
+ XUSB_CFG_AXI_CFG,
+ XUSB_CFG_24,
+ XUSB_CFG_16,
+};
+
+static const struct tegra_xusb_context_soc tegra124_xusb_context = {
+ .ipfs = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_ipfs),
+ .offsets = tegra124_xusb_context_ipfs,
+ },
+ .fpci = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+ .offsets = tegra124_xusb_context_fpci,
+ },
+};
+
static const struct tegra_xusb_soc tegra124_soc = {
.firmware = "nvidia/tegra124/xusb.bin",
.supply_names = tegra124_supply_names,
.num_supplies = ARRAY_SIZE(tegra124_supply_names),
.phy_types = tegra124_phy_types,
.num_types = ARRAY_SIZE(tegra124_phy_types),
+ .context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 6, .count = 2, },
@@ -1414,6 +1725,7 @@ static const struct tegra_xusb_soc tegra210_soc = {
.num_supplies = ARRAY_SIZE(tegra210_supply_names),
.phy_types = tegra210_phy_types,
.num_types = ARRAY_SIZE(tegra210_phy_types),
+ .context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 8, .count = 1, },
@@ -1432,6 +1744,7 @@ MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
static const char * const tegra186_supply_names[] = {
};
+MODULE_FIRMWARE("nvidia/tegra186/xusb.bin");
static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "usb3", .num = 3, },
@@ -1439,12 +1752,20 @@ static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "hsic", .num = 1, },
};
+static const struct tegra_xusb_context_soc tegra186_xusb_context = {
+ .fpci = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+ .offsets = tegra124_xusb_context_fpci,
+ },
+};
+
static const struct tegra_xusb_soc tegra186_soc = {
.firmware = "nvidia/tegra186/xusb.bin",
.supply_names = tegra186_supply_names,
.num_supplies = ARRAY_SIZE(tegra186_supply_names),
.phy_types = tegra186_phy_types,
.num_types = ARRAY_SIZE(tegra186_phy_types),
+ .context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 3, },
.usb2 = { .offset = 3, .count = 3, },
@@ -1474,6 +1795,7 @@ static const struct tegra_xusb_soc tegra194_soc = {
.num_supplies = ARRAY_SIZE(tegra194_supply_names),
.phy_types = tegra194_phy_types,
.num_types = ARRAY_SIZE(tegra194_phy_types),
+ .context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 4, },
.usb2 = { .offset = 4, .count = 4, },
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 07cc82ff327c..ccd30f835888 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -50,7 +50,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
}
/* map available memory */
- iobase = ioremap_nocache(mem_start, mem_length);
+ iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "Error ioremap failed\n");
release_mem_region(mem_start, mem_length);
@@ -101,7 +101,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
return -EBUSY;
}
- iobase = ioremap_nocache(mem_start, mem_length);
+ iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "ioremap #1\n");
release_mem_region(mem_start, mem_length);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 72f39a9751b5..116bd789e568 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -7,11 +7,10 @@
#include <linux/clk.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb3503.h>
#include <linux/regmap.h>
@@ -47,19 +46,19 @@ struct usb3503 {
struct device *dev;
struct clk *clk;
u8 port_off_mask;
- int gpio_intn;
- int gpio_reset;
- int gpio_connect;
+ struct gpio_desc *intn;
+ struct gpio_desc *reset;
+ struct gpio_desc *connect;
bool secondary_ref_clk;
};
static int usb3503_reset(struct usb3503 *hub, int state)
{
- if (!state && gpio_is_valid(hub->gpio_connect))
- gpio_set_value_cansleep(hub->gpio_connect, 0);
+ if (!state && hub->connect)
+ gpiod_set_value_cansleep(hub->connect, 0);
- if (gpio_is_valid(hub->gpio_reset))
- gpio_set_value_cansleep(hub->gpio_reset, state);
+ if (hub->reset)
+ gpiod_set_value_cansleep(hub->reset, !state);
/* Wait T_HUBINIT == 4ms for hub logic to stabilize */
if (state)
@@ -115,8 +114,8 @@ static int usb3503_connect(struct usb3503 *hub)
}
}
- if (gpio_is_valid(hub->gpio_connect))
- gpio_set_value_cansleep(hub->gpio_connect, 1);
+ if (hub->connect)
+ gpiod_set_value_cansleep(hub->connect, 1);
hub->mode = USB3503_MODE_HUB;
dev_info(dev, "switched to HUB mode\n");
@@ -163,13 +162,11 @@ static int usb3503_probe(struct usb3503 *hub)
int err;
u32 mode = USB3503_MODE_HUB;
const u32 *property;
+ enum gpiod_flags flags;
int len;
if (pdata) {
hub->port_off_mask = pdata->port_off_mask;
- hub->gpio_intn = pdata->gpio_intn;
- hub->gpio_connect = pdata->gpio_connect;
- hub->gpio_reset = pdata->gpio_reset;
hub->mode = pdata->initial_mode;
} else if (np) {
u32 rate = 0;
@@ -230,59 +227,38 @@ static int usb3503_probe(struct usb3503 *hub)
}
}
- hub->gpio_intn = of_get_named_gpio(np, "intn-gpios", 0);
- if (hub->gpio_intn == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- hub->gpio_connect = of_get_named_gpio(np, "connect-gpios", 0);
- if (hub->gpio_connect == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
- if (hub->gpio_reset == -EPROBE_DEFER)
- return -EPROBE_DEFER;
of_property_read_u32(np, "initial-mode", &mode);
hub->mode = mode;
}
- if (hub->port_off_mask && !hub->regmap)
- dev_err(dev, "Ports disabled with no control interface\n");
-
- if (gpio_is_valid(hub->gpio_intn)) {
- int val = hub->secondary_ref_clk ? GPIOF_OUT_INIT_LOW :
- GPIOF_OUT_INIT_HIGH;
- err = devm_gpio_request_one(dev, hub->gpio_intn, val,
- "usb3503 intn");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as interrupt pin (%d)\n",
- hub->gpio_intn, err);
- return err;
- }
- }
-
- if (gpio_is_valid(hub->gpio_connect)) {
- err = devm_gpio_request_one(dev, hub->gpio_connect,
- GPIOF_OUT_INIT_LOW, "usb3503 connect");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as connect pin (%d)\n",
- hub->gpio_connect, err);
- return err;
- }
- }
-
- if (gpio_is_valid(hub->gpio_reset)) {
- err = devm_gpio_request_one(dev, hub->gpio_reset,
- GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ if (hub->secondary_ref_clk)
+ flags = GPIOD_OUT_LOW;
+ else
+ flags = GPIOD_OUT_HIGH;
+ hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
+ if (IS_ERR(hub->intn))
+ return PTR_ERR(hub->intn);
+ if (hub->intn)
+ gpiod_set_consumer_name(hub->intn, "usb3503 intn");
+
+ hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
+ if (IS_ERR(hub->connect))
+ return PTR_ERR(hub->connect);
+ if (hub->connect)
+ gpiod_set_consumer_name(hub->connect, "usb3503 connect");
+
+ hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hub->reset))
+ return PTR_ERR(hub->reset);
+ if (hub->reset) {
/* Datasheet defines a hardware reset to be at least 100us */
usleep_range(100, 10000);
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as reset pin (%d)\n",
- hub->gpio_reset, err);
- return err;
- }
+ gpiod_set_consumer_name(hub->reset, "usb3503 reset");
}
+ if (hub->port_off_mask && !hub->regmap)
+ dev_err(dev, "Ports disabled with no control interface\n");
+
usb3503_switch_mode(hub, hub->mode);
dev_info(dev, "%s: probed in %s mode\n", __func__,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 52f8e2b57ad5..eb2ded1026ee 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -101,7 +101,6 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
- select USB_MUSB_AM335X_CHILD
depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on OF_IRQ
@@ -111,13 +110,16 @@ config USB_MUSB_UX500
config USB_MUSB_JZ4740
tristate "JZ4740"
- depends on NOP_USB_XCEIV
depends on MIPS || COMPILE_TEST
depends on USB_MUSB_GADGET
depends on USB=n || USB_OTG_BLACKLIST_HUB
-config USB_MUSB_AM335X_CHILD
- tristate
+config USB_MUSB_MEDIATEK
+ tristate "MediaTek platforms"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on NOP_USB_XCEIV
+ depends on GENERIC_PHY
+ select USB_ROLE_SWITCH
comment "MUSB DMA mode"
@@ -142,7 +144,7 @@ config USB_UX500_DMA
config USB_INVENTRA_DMA
bool 'Inventra'
- depends on USB_MUSB_OMAP2PLUS
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a88c79e650c..932247360a9f 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,9 +24,7 @@ obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
obj-$(CONFIG_USB_MUSB_SUNXI) += sunxi.o
-
-
-obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
+obj-$(CONFIG_USB_MUSB_MEDIATEK) += mediatek.o
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index fb6bbd254ab7..704435526394 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
@@ -25,10 +25,6 @@
#include "musb_core.h"
-#ifdef CONFIG_MACH_DAVINCI_EVM
-#define GPIO_nVBUS_DRV 160
-#endif
-
#include "davinci.h"
#include "cppi_dma.h"
@@ -40,6 +36,9 @@ struct davinci_glue {
struct device *dev;
struct platform_device *musb;
struct clk *clk;
+ bool vbus_state;
+ struct gpio_desc *vbus;
+ struct work_struct vbus_work;
};
/* REVISIT (PM) we should be able to keep the PHY in low power mode most
@@ -135,43 +134,44 @@ static void davinci_musb_disable(struct musb *musb)
* when J10 is out, and TI documents it as handling OTG.
*/
-#ifdef CONFIG_MACH_DAVINCI_EVM
-
-static int vbus_state = -1;
-
/* I2C operations are always synchronous, and require a task context.
* With unloaded systems, using the shared workqueue seems to suffice
* to satisfy the 100msec A_WAIT_VRISE timeout...
*/
-static void evm_deferred_drvvbus(struct work_struct *ignored)
+static void evm_deferred_drvvbus(struct work_struct *work)
{
- gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
- vbus_state = !vbus_state;
-}
+ struct davinci_glue *glue = container_of(work, struct davinci_glue,
+ vbus_work);
-#endif /* EVM */
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
+ glue->vbus_state = !glue->vbus_state;
+}
-static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
+static void davinci_musb_source_power(struct musb *musb, int is_on,
+ int immediate)
{
-#ifdef CONFIG_MACH_DAVINCI_EVM
+ struct davinci_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /* This GPIO handling is entirely optional */
+ if (!glue->vbus)
+ return;
+
if (is_on)
is_on = 1;
- if (vbus_state == is_on)
+ if (glue->vbus_state == is_on)
return;
- vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
+ /* 0/1 vs "-1 == unknown/init" */
+ glue->vbus_state = !is_on;
if (machine_is_davinci_evm()) {
- static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
-
if (immediate)
- gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
else
- schedule_work(&evm_vbus_work);
+ schedule_work(&glue->vbus_work);
}
if (immediate)
- vbus_state = is_on;
-#endif
+ glue->vbus_state = is_on;
}
static void davinci_musb_set_vbus(struct musb *musb, int is_on)
@@ -524,6 +524,15 @@ static int davinci_probe(struct platform_device *pdev)
pdata->platform_ops = &davinci_ops;
+ glue->vbus = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(glue->vbus)) {
+ ret = PTR_ERR(glue->vbus);
+ goto err0;
+ } else {
+ glue->vbus_state = -1;
+ INIT_WORK(&glue->vbus_work, evm_deferred_drvvbus);
+ }
+
usb_phy_generic_register();
platform_set_drvdata(pdev, glue);
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index e3b8c84ccdb8..bc0109f4700b 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -17,16 +17,15 @@
#include "musb_core.h"
struct jz4740_glue {
- struct device *dev;
- struct platform_device *musb;
+ struct platform_device *pdev;
struct clk *clk;
};
static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
{
- unsigned long flags;
- irqreturn_t retval = IRQ_NONE;
- struct musb *musb = __hci;
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
@@ -40,7 +39,7 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
* never see them set
*/
musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
- MUSB_INTR_RESET | MUSB_INTR_SOF;
+ MUSB_INTR_RESET | MUSB_INTR_SOF;
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
@@ -51,25 +50,20 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
}
static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
};
static const struct musb_hdrc_config jz4740_musb_config = {
/* Silicon does not implement USB OTG. */
- .multipoint = 0,
+ .multipoint = 0,
/* Max EPs scanned, driver will decide which EP can be used. */
- .num_eps = 4,
+ .num_eps = 4,
/* RAMbits needed to configure EPs from table */
- .ram_bits = 9,
- .fifo_cfg = jz4740_musb_fifo_cfg,
- .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
-};
-
-static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
- .mode = MUSB_PERIPHERAL,
- .config = &jz4740_musb_config,
+ .ram_bits = 9,
+ .fifo_cfg = jz4740_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
};
static int jz4740_musb_init(struct musb *musb)
@@ -88,7 +82,8 @@ static int jz4740_musb_init(struct musb *musb)
return err;
}
- /* Silicon does not implement ConfigData register.
+ /*
+ * Silicon does not implement ConfigData register.
* Set dyn_fifo to avoid reading EP config from hardware.
*/
musb->dyn_fifo = true;
@@ -108,63 +103,67 @@ static const struct musb_platform_ops jz4740_musb_ops = {
.init = jz4740_musb_init,
};
+static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
+ .mode = MUSB_PERIPHERAL,
+ .config = &jz4740_musb_config,
+ .platform_ops = &jz4740_musb_ops,
+};
+
static int jz4740_probe(struct platform_device *pdev)
{
- struct musb_hdrc_platform_data *pdata = &jz4740_musb_platform_data;
+ struct device *dev = &pdev->dev;
+ const struct musb_hdrc_platform_data *pdata = &jz4740_musb_pdata;
struct platform_device *musb;
struct jz4740_glue *glue;
- struct clk *clk;
+ struct clk *clk;
int ret;
- glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
+ dev_err(dev, "failed to allocate musb device");
return -ENOMEM;
}
- clk = devm_clk_get(&pdev->dev, "udc");
+ clk = devm_clk_get(dev, "udc");
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
+ dev_err(dev, "failed to get clock");
ret = PTR_ERR(clk);
goto err_platform_device_put;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "failed to enable clock\n");
+ dev_err(dev, "failed to enable clock");
goto err_platform_device_put;
}
- musb->dev.parent = &pdev->dev;
+ musb->dev.parent = dev;
- glue->dev = &pdev->dev;
- glue->musb = musb;
+ glue->pdev = musb;
glue->clk = clk;
- pdata->platform_ops = &jz4740_musb_ops;
-
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
+ dev_err(dev, "failed to add resources");
goto err_clk_disable;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
+ dev_err(dev, "failed to add platform_data");
goto err_clk_disable;
}
ret = platform_device_add(musb);
if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ dev_err(dev, "failed to register musb device");
goto err_clk_disable;
}
@@ -179,9 +178,9 @@ err_platform_device_put:
static int jz4740_remove(struct platform_device *pdev)
{
- struct jz4740_glue *glue = platform_get_drvdata(pdev);
+ struct jz4740_glue *glue = platform_get_drvdata(pdev);
- platform_device_unregister(glue->musb);
+ platform_device_unregister(glue->pdev);
clk_disable_unprepare(glue->clk);
return 0;
@@ -190,7 +189,7 @@ static int jz4740_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id jz4740_musb_of_match[] = {
{ .compatible = "ingenic,jz4740-musb" },
- {},
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
#endif
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
new file mode 100644
index 000000000000..6b88c2f5d970
--- /dev/null
+++ b/drivers/usb/musb/mediatek.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author:
+ * Min Guo <min.guo@mediatek.com>
+ * Yonglong Wu <yonglong.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define USB_L1INTS 0x00a0
+#define USB_L1INTM 0x00a4
+#define MTK_MUSB_TXFUNCADDR 0x0480
+
+/* MediaTek controller toggle enable and status reg */
+#define MUSB_RXTOG 0x80
+#define MUSB_RXTOGEN 0x82
+#define MUSB_TXTOG 0x84
+#define MUSB_TXTOGEN 0x86
+#define MTK_TOGGLE_EN GENMASK(15, 0)
+
+#define TX_INT_STATUS BIT(0)
+#define RX_INT_STATUS BIT(1)
+#define USBCOM_INT_STATUS BIT(2)
+#define DMA_INT_STATUS BIT(3)
+
+#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
+#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
+
+struct mtk_glue {
+ struct device *dev;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
+ struct platform_device *usb_phy;
+ struct phy *phy;
+ struct usb_phy *xceiv;
+ enum phy_mode phy_mode;
+ struct clk *main;
+ struct clk *mcu;
+ struct clk *univpll;
+ enum usb_role role;
+ struct usb_role_switch *role_sw;
+};
+
+static int mtk_musb_clks_get(struct mtk_glue *glue)
+{
+ struct device *dev = glue->dev;
+
+ glue->main = devm_clk_get(dev, "main");
+ if (IS_ERR(glue->main)) {
+ dev_err(dev, "fail to get main clock\n");
+ return PTR_ERR(glue->main);
+ }
+
+ glue->mcu = devm_clk_get(dev, "mcu");
+ if (IS_ERR(glue->mcu)) {
+ dev_err(dev, "fail to get mcu clock\n");
+ return PTR_ERR(glue->mcu);
+ }
+
+ glue->univpll = devm_clk_get(dev, "univpll");
+ if (IS_ERR(glue->univpll)) {
+ dev_err(dev, "fail to get univpll clock\n");
+ return PTR_ERR(glue->univpll);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_clks_enable(struct mtk_glue *glue)
+{
+ int ret;
+
+ ret = clk_prepare_enable(glue->main);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable main clock\n");
+ goto err_main_clk;
+ }
+
+ ret = clk_prepare_enable(glue->mcu);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable mcu clock\n");
+ goto err_mcu_clk;
+ }
+
+ ret = clk_prepare_enable(glue->univpll);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable univpll clock\n");
+ goto err_univpll_clk;
+ }
+
+ return 0;
+
+err_univpll_clk:
+ clk_disable_unprepare(glue->mcu);
+err_mcu_clk:
+ clk_disable_unprepare(glue->main);
+err_main_clk:
+ return ret;
+}
+
+static void mtk_musb_clks_disable(struct mtk_glue *glue)
+{
+ clk_disable_unprepare(glue->univpll);
+ clk_disable_unprepare(glue->mcu);
+ clk_disable_unprepare(glue->main);
+}
+
+static int musb_usb_role_sx_set(struct device *dev, enum usb_role role)
+{
+ struct mtk_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue->musb;
+ u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
+ enum usb_role new_role;
+
+ if (role == glue->role)
+ return 0;
+
+ switch (role) {
+ case USB_ROLE_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ MUSB_HST_MODE(musb);
+ break;
+ case USB_ROLE_DEVICE:
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ MUSB_DEV_MODE(musb);
+ break;
+ case USB_ROLE_NONE:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role != USB_ROLE_NONE)
+ phy_power_off(glue->phy);
+
+ break;
+ default:
+ dev_err(glue->dev, "Invalid State\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+ return 0;
+}
+
+static enum usb_role musb_usb_role_sx_get(struct device *dev)
+{
+ struct mtk_glue *glue = dev_get_drvdata(dev);
+
+ return glue->role;
+}
+
+static int mtk_otg_switch_init(struct mtk_glue *glue)
+{
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+
+ role_sx_desc.set = musb_usb_role_sx_set;
+ role_sx_desc.get = musb_usb_role_sx_get;
+ role_sx_desc.fwnode = dev_fwnode(glue->dev);
+ glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
+
+ return PTR_ERR_OR_ZERO(glue->role_sw);
+}
+
+static void mtk_otg_switch_exit(struct mtk_glue *glue)
+{
+ return usb_role_switch_unregister(glue->role_sw);
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
+ musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
+ musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = (struct musb *)dev_id;
+ u32 l1_ints;
+
+ l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
+ musb_readl(musb->mregs, USB_L1INTM);
+
+ if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
+ retval = generic_interrupt(irq, musb);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ if (l1_ints & DMA_INT_STATUS)
+ retval = dma_controller_irq(irq, musb->dma_controller);
+#endif
+ return retval;
+}
+
+static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
+{
+ return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
+}
+
+static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
+{
+ u8 data;
+
+ /* W1C */
+ data = musb_readb(addr, offset);
+ musb_writeb(addr, offset, data);
+ return data;
+}
+
+static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
+{
+ u16 data;
+
+ /* W1C */
+ data = musb_readw(addr, offset);
+ musb_writew(addr, offset, data);
+ return data;
+}
+
+static int mtk_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ enum phy_mode new_mode;
+ enum usb_role new_role;
+
+ switch (mode) {
+ case MUSB_HOST:
+ new_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ break;
+ case MUSB_PERIPHERAL:
+ new_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ break;
+ case MUSB_OTG:
+ new_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(glue->dev, "Invalid mode request\n");
+ return -EINVAL;
+ }
+
+ if (glue->phy_mode == new_mode)
+ return 0;
+
+ if (musb->port_mode != MUSB_OTG) {
+ dev_err(glue->dev, "Does not support changing modes\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ musb_usb_role_sx_set(dev, glue->role);
+ return 0;
+}
+
+static int mtk_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ int ret;
+
+ glue->musb = musb;
+ musb->phy = glue->phy;
+ musb->xceiv = glue->xceiv;
+ musb->is_host = false;
+ musb->isr = mtk_musb_interrupt;
+
+ /* Set TX/RX toggle enable */
+ musb_writew(musb->mregs, MUSB_TXTOGEN, MTK_TOGGLE_EN);
+ musb_writew(musb->mregs, MUSB_RXTOGEN, MTK_TOGGLE_EN);
+
+ if (musb->port_mode == MUSB_OTG) {
+ ret = mtk_otg_switch_init(glue);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_init(glue->phy);
+ if (ret)
+ goto err_phy_init;
+
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ goto err_phy_power_on;
+
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ musb_writel(musb->mregs, MUSB_HSDMA_INTR,
+ DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
+#endif
+ musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
+ USBCOM_INT_STATUS | DMA_INT_STATUS);
+ return 0;
+
+err_phy_power_on:
+ phy_exit(glue->phy);
+err_phy_init:
+ mtk_otg_switch_exit(glue);
+ return ret;
+}
+
+static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 toggle;
+
+ toggle = musb_readw(musb->mregs, is_out ? MUSB_TXTOG : MUSB_RXTOG);
+ return toggle & (1 << epnum);
+}
+
+static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 value, toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out) {
+ value = musb_readw(musb->mregs, MUSB_TXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_TXTOG, value);
+ } else {
+ value = musb_readw(musb->mregs, MUSB_RXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_RXTOG, value);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+
+ mtk_otg_switch_exit(glue);
+ phy_power_off(glue->phy);
+ phy_exit(glue->phy);
+ mtk_musb_clks_disable(glue);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return 0;
+}
+
+static const struct musb_platform_ops mtk_musb_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+ .init = mtk_musb_init,
+ .get_toggle = mtk_musb_get_toggle,
+ .set_toggle = mtk_musb_set_toggle,
+ .exit = mtk_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create_noirq,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .clearb = mtk_musb_clearb,
+ .clearw = mtk_musb_clearw,
+ .busctl_offset = mtk_musb_busctl_offset,
+ .set_mode = mtk_musb_set_mode,
+};
+
+#define MTK_MUSB_MAX_EP_NUM 8
+#define MTK_MUSB_RAM_BITS 11
+
+static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
+ { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
+ { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config mtk_musb_hdrc_config = {
+ .fifo_cfg = mtk_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = MTK_MUSB_MAX_EP_NUM,
+ .ram_bits = MTK_MUSB_RAM_BITS,
+};
+
+static const struct platform_device_info mtk_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int mtk_musb_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata;
+ struct mtk_glue *glue;
+ struct platform_device_info pinfo;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = dev;
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create child devices at %p\n", np);
+ return ret;
+ }
+
+ ret = mtk_musb_clks_get(glue);
+ if (ret)
+ return ret;
+
+ pdata->config = &mtk_musb_hdrc_config;
+ pdata->platform_ops = &mtk_musb_ops;
+ pdata->mode = usb_get_dr_mode(dev);
+
+ if (IS_ENABLED(CONFIG_USB_MUSB_HOST))
+ pdata->mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_MUSB_GADGET))
+ pdata->mode = USB_DR_MODE_PERIPHERAL;
+
+ switch (pdata->mode) {
+ case USB_DR_MODE_HOST:
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ glue->role = USB_ROLE_HOST;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ glue->role = USB_ROLE_DEVICE;
+ break;
+ case USB_DR_MODE_OTG:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ glue->role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(&pdev->dev, "Error 'dr_mode' property\n");
+ return -EINVAL;
+ }
+
+ glue->phy = devm_of_phy_get_by_index(dev, np, 0);
+ if (IS_ERR(glue->phy)) {
+ dev_err(dev, "fail to getting phy %ld\n",
+ PTR_ERR(glue->phy));
+ return PTR_ERR(glue->phy);
+ }
+
+ glue->usb_phy = usb_phy_generic_register();
+ if (IS_ERR(glue->usb_phy)) {
+ dev_err(dev, "fail to registering usb-phy %ld\n",
+ PTR_ERR(glue->usb_phy));
+ return PTR_ERR(glue->usb_phy);
+ }
+
+ glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(glue->xceiv)) {
+ dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ ret = PTR_ERR(glue->xceiv);
+ goto err_unregister_usb_phy;
+ }
+
+ platform_set_drvdata(pdev, glue);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ ret = mtk_musb_clks_enable(glue);
+ if (ret)
+ goto err_enable_clk;
+
+ pinfo = mtk_dev_info;
+ pinfo.parent = dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
+ dev_err(dev, "failed to register musb device: %d\n", ret);
+ goto err_device_register;
+ }
+
+ return 0;
+
+err_device_register:
+ mtk_musb_clks_disable(glue);
+err_enable_clk:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+err_unregister_usb_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+}
+
+static int mtk_musb_remove(struct platform_device *pdev)
+{
+ struct mtk_glue *glue = platform_get_drvdata(pdev);
+ struct platform_device *usb_phy = glue->usb_phy;
+
+ platform_device_unregister(glue->musb_pdev);
+ usb_phy_generic_unregister(usb_phy);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_musb_match[] = {
+ {.compatible = "mediatek,mtk-musb",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_musb_match);
+#endif
+
+static struct platform_driver mtk_musb_driver = {
+ .probe = mtk_musb_probe,
+ .remove = mtk_musb_remove,
+ .driver = {
+ .name = "musb-mtk",
+ .of_match_table = of_match_ptr(mtk_musb_match),
+ },
+};
+
+module_platform_driver(mtk_musb_driver);
+
+MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
+MODULE_AUTHOR("Min Guo <min.guo@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
deleted file mode 100644
index 5f04f8e3a640..000000000000
--- a/drivers/usb/musb/musb_am335x.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-static int am335x_child_probe(struct platform_device *pdev)
-{
- int ret;
-
- pm_runtime_enable(&pdev->dev);
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
- if (ret)
- goto err;
-
- return 0;
-err:
- pm_runtime_disable(&pdev->dev);
- return ret;
-}
-
-static const struct of_device_id am335x_child_of_match[] = {
- { .compatible = "ti,am33xx-usb" },
- { },
-};
-MODULE_DEVICE_TABLE(of, am335x_child_of_match);
-
-static struct platform_driver am335x_child_driver = {
- .probe = am335x_child_probe,
- .driver = {
- .name = "am335x-usb-childs",
- .of_match_table = am335x_child_of_match,
- },
-};
-
-static int __init am335x_child_init(void)
-{
- return platform_driver_register(&am335x_child_driver);
-}
-module_init(am335x_child_init);
-
-MODULE_DESCRIPTION("AM33xx child devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5ebf30bd61bd..f616fb489542 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -73,6 +73,7 @@
#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/of.h>
@@ -246,7 +247,7 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
return 0x80 + (0x08 * epnum) + offset;
}
-static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
+static u8 musb_default_readb(void __iomem *addr, u32 offset)
{
u8 data = __raw_readb(addr + offset);
@@ -254,13 +255,13 @@ static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
return data;
}
-static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
+static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
{
trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
__raw_writeb(data, addr + offset);
}
-static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
+static u16 musb_default_readw(void __iomem *addr, u32 offset)
{
u16 data = __raw_readw(addr + offset);
@@ -268,12 +269,44 @@ static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
return data;
}
-static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
+static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
{
trace_musb_writew(__builtin_return_address(0), addr, offset, data);
__raw_writew(data, addr + offset);
}
+static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
+{
+ void __iomem *epio = qh->hw_ep->regs;
+ u16 csr;
+
+ if (is_out)
+ csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
+
+ return csr;
+}
+
+static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
+ struct urb *urb)
+{
+ u16 csr;
+ u16 toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out)
+ csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE)
+ : MUSB_TXCSR_CLRDATATOG;
+ else
+ csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE) : 0;
+
+ return csr;
+}
+
/*
* Load an endpoint's FIFO
*/
@@ -364,19 +397,25 @@ static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
/*
* Old style IO functions
*/
-u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+u8 (*musb_readb)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readb);
-void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
+void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
EXPORT_SYMBOL_GPL(musb_writeb);
-u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearb);
+
+u16 (*musb_readw)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readw);
-void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
+void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
EXPORT_SYMBOL_GPL(musb_writew);
-u32 musb_readl(const void __iomem *addr, unsigned offset)
+u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearw);
+
+u32 musb_readl(void __iomem *addr, u32 offset)
{
u32 data = __raw_readl(addr + offset);
@@ -385,7 +424,7 @@ u32 musb_readl(const void __iomem *addr, unsigned offset)
}
EXPORT_SYMBOL_GPL(musb_readl);
-void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+void musb_writel(void __iomem *addr, u32 offset, u32 data)
{
trace_musb_writel(__builtin_return_address(0), addr, offset, data);
__raw_writel(data, addr + offset);
@@ -414,6 +453,108 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
return hw_ep->musb->io.write_fifo(hw_ep, len, src);
}
+static u8 musb_read_devctl(struct musb *musb)
+{
+ return musb_readb(musb->mregs, MUSB_DEVCTL);
+}
+
+/**
+ * musb_set_host - set and initialize host mode
+ * @musb: musb controller driver data
+ *
+ * At least some musb revisions need to enable devctl session bit in
+ * peripheral mode to switch to host mode. Initializes things to host
+ * mode and sets A_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ *
+ * Note that the SoC glue code may need to wait for musb to settle
+ * on enable before calling this to avoid babble.
+ */
+int musb_set_host(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
+ dev_info(musb->controller,
+ "%s: already in host mode: %02x\n",
+ __func__, devctl);
+ goto init_data;
+ }
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ !(devctl & MUSB_DEVCTL_BDEVICE), 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set host: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+init_data:
+ musb->is_active = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_host);
+
+/**
+ * musb_set_peripheral - set and initialize peripheral mode
+ * @musb: musb controller driver data
+ *
+ * Clears devctl session bit and initializes things for peripheral
+ * mode and sets B_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ */
+int musb_set_peripheral(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ dev_info(musb->controller,
+ "%s: already in peripheral mode: %02x\n",
+ __func__, devctl);
+
+ goto init_data;
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ devctl & MUSB_DEVCTL_BDEVICE, 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+init_data:
+ musb->is_active = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_peripheral);
+
/*-------------------------------------------------------------------------*/
/* for high speed test mode; see USB 2.0 spec 7.1.20 */
@@ -909,7 +1050,6 @@ static void musb_handle_intr_reset(struct musb *musb)
* @param musb instance pointer
* @param int_usb register contents
* @param devctl
- * @param power
*/
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
@@ -1015,7 +1155,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
static void musb_disable_interrupts(struct musb *musb)
{
void __iomem *mbase = musb->mregs;
- u16 temp;
/* disable interrupts */
musb_writeb(mbase, MUSB_INTRUSBE, 0);
@@ -1025,9 +1164,9 @@ static void musb_disable_interrupts(struct musb *musb)
musb_writew(mbase, MUSB_INTRRXE, 0);
/* flush pending interrupts */
- temp = musb_readb(mbase, MUSB_INTRUSB);
- temp = musb_readw(mbase, MUSB_INTRTX);
- temp = musb_readw(mbase, MUSB_INTRRX);
+ musb_clearb(mbase, MUSB_INTRUSB);
+ musb_clearw(mbase, MUSB_INTRTX);
+ musb_clearw(mbase, MUSB_INTRRX);
}
static void musb_enable_interrupts(struct musb *musb)
@@ -2254,10 +2393,19 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_readb = musb->ops->readb;
if (musb->ops->writeb)
musb_writeb = musb->ops->writeb;
+ if (musb->ops->clearb)
+ musb_clearb = musb->ops->clearb;
+ else
+ musb_clearb = musb_readb;
+
if (musb->ops->readw)
musb_readw = musb->ops->readw;
if (musb->ops->writew)
musb_writew = musb->ops->writew;
+ if (musb->ops->clearw)
+ musb_clearw = musb->ops->clearw;
+ else
+ musb_clearw = musb_readw;
#ifndef CONFIG_MUSB_PIO_ONLY
if (!musb->ops->dma_init || !musb->ops->dma_exit) {
@@ -2279,6 +2427,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
else
musb->io.write_fifo = musb_default_write_fifo;
+ if (musb->ops->get_toggle)
+ musb->io.get_toggle = musb->ops->get_toggle;
+ else
+ musb->io.get_toggle = musb_default_get_toggle;
+
+ if (musb->ops->set_toggle)
+ musb->io.set_toggle = musb->ops->set_toggle;
+ else
+ musb->io.set_toggle = musb_default_set_toggle;
+
if (!musb->xceiv->io_ops) {
musb->xceiv->io_dev = musb->controller;
musb->xceiv->io_priv = musb->mregs;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 04203b7126d5..290a2bc46606 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -27,6 +27,7 @@
struct musb;
struct musb_hw_ep;
struct musb_ep;
+struct musb_qh;
/* Helper defines for struct musb->hwvers */
#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
@@ -119,10 +120,14 @@ struct musb_io;
* @fifo_offset: returns the fifo offset
* @readb: read 8 bits
* @writeb: write 8 bits
+ * @clearb: could be clear-on-readb or W1C
* @readw: read 16 bits
* @writew: write 16 bits
+ * @clearw: could be clear-on-readw or W1C
* @read_fifo: reads the fifo
* @write_fifo: writes to fifo
+ * @get_toggle: platform specific get toggle function
+ * @set_toggle: platform specific set toggle function
* @dma_init: platform specific dma init function
* @dma_exit: platform specific dma exit function
* @init: turns on clocks, sets up platform-specific registers, etc
@@ -161,12 +166,16 @@ struct musb_platform_ops {
u16 fifo_mode;
u32 (*fifo_offset)(u8 epnum);
u32 (*busctl_offset)(u8 epnum, u16 offset);
- u8 (*readb)(const void __iomem *addr, unsigned offset);
- void (*writeb)(void __iomem *addr, unsigned offset, u8 data);
- u16 (*readw)(const void __iomem *addr, unsigned offset);
- void (*writew)(void __iomem *addr, unsigned offset, u16 data);
+ u8 (*readb)(void __iomem *addr, u32 offset);
+ void (*writeb)(void __iomem *addr, u32 offset, u8 data);
+ u8 (*clearb)(void __iomem *addr, u32 offset);
+ u16 (*readw)(void __iomem *addr, u32 offset);
+ void (*writew)(void __iomem *addr, u32 offset, u16 data);
+ u16 (*clearw)(void __iomem *addr, u32 offset);
void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
struct dma_controller *
(*dma_init) (struct musb *musb, void __iomem *base);
void (*dma_exit)(struct dma_controller *c);
@@ -487,6 +496,9 @@ extern void musb_start(struct musb *musb);
extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+extern int musb_set_host(struct musb *musb);
+extern int musb_set_peripheral(struct musb *musb);
+
extern void musb_load_testpacket(struct musb *);
extern irqreturn_t musb_interrupt(struct musb *);
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 8f60271c0a9d..4b4d8dc5d3f2 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -35,6 +35,12 @@ struct musb_hw_ep;
* whether shared with the Inventra core or separate.
*/
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#ifdef CONFIG_MUSB_PIO_ONLY
@@ -191,6 +197,9 @@ extern void (*musb_dma_controller_destroy)(struct dma_controller *);
extern struct dma_controller *
musbhs_dma_controller_create(struct musb *musb, void __iomem *base);
extern void musbhs_dma_controller_destroy(struct dma_controller *c);
+extern struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base);
+extern irqreturn_t dma_controller_irq(int irq, void *private_data);
extern struct dma_controller *
tusb_dma_controller_create(struct musb *musb, void __iomem *base);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 5a44b70372d9..886c9b602f8c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -286,26 +286,6 @@ __acquires(musb->lock)
spin_lock(&musb->lock);
}
-/* For bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
- struct urb *urb)
-{
- void __iomem *epio = qh->hw_ep->regs;
- u16 csr;
-
- /*
- * FIXME: the current Mentor DMA code seems to have
- * problems getting toggle correct.
- */
-
- if (is_in)
- csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
- else
- csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
-
- usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
-}
-
/*
* Advance this hardware endpoint's queue, completing the specified URB and
* advancing to either the next URB queued to that qh, or else invalidating
@@ -320,6 +300,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
struct musb_hw_ep *ep = qh->hw_ep;
int ready = qh->is_ready;
int status;
+ u16 toggle;
status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
@@ -327,7 +308,8 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
- musb_save_toggle(qh, is_in, urb);
+ toggle = musb->io.get_toggle(qh, !is_in);
+ usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
break;
case USB_ENDPOINT_XFER_ISOC:
if (status == 0 && urb->error_count)
@@ -772,13 +754,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
);
csr |= MUSB_TXCSR_MODE;
- if (!hw_ep->tx_double_buffered) {
- if (usb_gettoggle(urb->dev, qh->epnum, 1))
- csr |= MUSB_TXCSR_H_WR_DATATOGGLE
- | MUSB_TXCSR_H_DATATOGGLE;
- else
- csr |= MUSB_TXCSR_CLRDATATOG;
- }
+ if (!hw_ep->tx_double_buffered)
+ csr |= musb->io.set_toggle(qh, is_out, urb);
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
@@ -860,17 +837,12 @@ finish:
/* IN/receive */
} else {
- u16 csr;
+ u16 csr = 0;
if (hw_ep->rx_reinit) {
musb_rx_reinit(musb, qh, epnum);
+ csr |= musb->io.set_toggle(qh, is_out, urb);
- /* init new state: toggle and NYET, maybe DMA later */
- if (usb_gettoggle(urb->dev, qh->epnum, 0))
- csr = MUSB_RXCSR_H_WR_DATATOGGLE
- | MUSB_RXCSR_H_DATATOGGLE;
- else
- csr = 0;
if (qh->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
@@ -933,6 +905,7 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr, tx_csr;
+ u16 toggle;
musb_ep_select(mbase, ep->epnum);
if (is_in) {
@@ -970,7 +943,8 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
- musb_save_toggle(cur_qh, is_in, urb);
+ toggle = musb->io.get_toggle(cur_qh, !is_in);
+ usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
if (is_in) {
/* move cur_qh to end of queue */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 8058a58092cf..f17aabd95a50 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -22,6 +22,8 @@
* @read_fifo: platform specific function to read fifo
* @write_fifo: platform specific function to write fifo
* @busctl_offset: platform specific function to get busctl offset
+ * @get_toggle: platform specific function to get toggle
+ * @set_toggle: platform specific function to set toggle
*/
struct musb_io {
u32 (*ep_offset)(u8 epnum, u16 offset);
@@ -30,14 +32,18 @@ struct musb_io {
void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
u32 (*busctl_offset)(u8 epnum, u16 offset);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
};
/* Do not add new entries here, add them the struct musb_io instead */
-extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
-extern u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
-extern u32 musb_readl(const void __iomem *addr, unsigned offset);
-extern void musb_writel(void __iomem *addr, unsigned offset, u32 data);
+extern u8 (*musb_readb)(void __iomem *addr, u32 offset);
+extern void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
+extern u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+extern u16 (*musb_readw)(void __iomem *addr, u32 offset);
+extern void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
+extern u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+extern u32 musb_readl(void __iomem *addr, u32 offset);
+extern void musb_writel(void __iomem *addr, u32 offset, u32 data);
#endif
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index a97d618fe8ff..b193daf69685 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -38,11 +38,12 @@ TRACE_EVENT(musb_log,
);
DECLARE_EVENT_CLASS(musb_regb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u8, data)
),
@@ -57,21 +58,24 @@ DECLARE_EVENT_CLASS(musb_regb,
);
DEFINE_EVENT(musb_regb, musb_readb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regb, musb_writeb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data)
);
DECLARE_EVENT_CLASS(musb_regw,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u16, data)
),
@@ -86,21 +90,24 @@ DECLARE_EVENT_CLASS(musb_regw,
);
DEFINE_EVENT(musb_regw, musb_readw,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regw, musb_writew,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data)
);
DECLARE_EVENT_CLASS(musb_regl,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u32, data)
),
@@ -115,12 +122,14 @@ DECLARE_EVENT_CLASS(musb_regl,
);
DEFINE_EVENT(musb_regl, musb_readl,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regl, musb_writel,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data)
);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 2d3751d885b4..0aacfc8be5a1 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -10,12 +10,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "musb_core.h"
-
-#define MUSB_HSDMA_BASE 0x200
-#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL 0x4
-#define MUSB_HSDMA_ADDRESS 0x8
-#define MUSB_HSDMA_COUNT 0xc
+#include "musb_dma.h"
#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
@@ -268,7 +263,7 @@ static int dma_channel_abort(struct dma_channel *channel)
return 0;
}
-static irqreturn_t dma_controller_irq(int irq, void *private_data)
+irqreturn_t dma_controller_irq(int irq, void *private_data)
{
struct musb_dma_controller *controller = private_data;
struct musb *musb = controller->private_data;
@@ -289,7 +284,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
spin_lock_irqsave(&musb->lock, flags);
- int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+ int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
if (!int_hsdma) {
musb_dbg(musb, "spurious DMA irq");
@@ -383,6 +378,7 @@ done:
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(dma_controller_irq);
void musbhs_dma_controller_destroy(struct dma_controller *c)
{
@@ -398,18 +394,10 @@ void musbhs_dma_controller_destroy(struct dma_controller *c)
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
-struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
- void __iomem *base)
+static struct musb_dma_controller *
+dma_controller_alloc(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
- struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
- int irq = platform_get_irq_byname(pdev, "dma");
-
- if (irq <= 0) {
- dev_err(dev, "No DMA interrupt line!\n");
- return NULL;
- }
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
@@ -423,6 +411,25 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
+ return controller;
+}
+
+struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq_byname(pdev, "dma");
+
+ if (irq <= 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), controller)) {
@@ -437,3 +444,16 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
return &controller->controller;
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
+
+struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
+
+ return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3d2fef67746..d62c78b97cad 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -38,69 +38,6 @@ struct omap2430_glue {
static struct omap2430_glue *_glue;
-static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
-{
- struct usb_otg *otg = musb->xceiv->otg;
- u8 devctl;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- /* HDRC controls CPEN, but beware current surges during device
- * connect. They can trigger transient overcurrent conditions
- * that must be ignored.
- */
-
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- if (is_on) {
- if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
- int loops = 100;
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- /*
- * Wait for the musb to set as A device to enable the
- * VBUS
- */
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
-
- mdelay(5);
- cpu_relax();
-
- if (time_after(jiffies, timeout)
- || loops-- <= 0) {
- dev_err(musb->controller,
- "configured as A device timeout");
- break;
- }
- }
-
- otg_set_vbus(otg, 1);
- } else {
- musb->is_active = 1;
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- devctl |= MUSB_DEVCTL_SESSION;
- MUSB_HST_MODE(musb);
- }
- } else {
- musb->is_active = 0;
-
- /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
- * jumping right to B_IDLE...
- */
-
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- devctl &= ~MUSB_DEVCTL_SESSION;
-
- MUSB_DEV_MODE(musb);
- }
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
- dev_dbg(musb->controller, "VBUS %s, devctl %02x "
- /* otg %3x conf %08x prcm %08x */ "\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- musb_readb(musb->mregs, MUSB_DEVCTL));
-}
-
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
@@ -140,24 +77,52 @@ static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
return 0;
}
+/*
+ * HDRC controls CPEN, but beware current surges during device connect.
+ * They can trigger transient overcurrent conditions that must be ignored.
+ *
+ * Note that we're skipping A_WAIT_VFALL -> A_IDLE and jumping right to B_IDLE
+ * as set by musb_set_peripheral().
+ */
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
struct musb *musb = glue_to_musb(glue);
- struct musb_hdrc_platform_data *pdata =
- dev_get_platdata(musb->controller);
- struct omap_musb_board_data *data = pdata->board_data;
+ int error;
pm_runtime_get_sync(musb->controller);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
switch (glue->status) {
case MUSB_ID_GROUND:
dev_dbg(musb->controller, "ID GND\n");
-
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
- musb->xceiv->last_event = USB_EVENT_ID;
- if (musb->gadget_driver) {
- omap_control_usb_set_mode(glue->control_otghs,
- USB_MODE_HOST);
- omap2430_musb_set_vbus(musb, 1);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_IDLE:
+ error = musb_set_host(musb);
+ if (error)
+ break;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ /* Fall through */
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ /*
+ * On multiple ID ground interrupts just keep enabling
+ * VBUS. At least cpcap VBUS shuts down otherwise.
+ */
+ otg_set_vbus(musb->xceiv->otg, 1);
+ break;
+ default:
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->last_event = USB_EVENT_ID;
+ if (musb->gadget_driver) {
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_HOST);
+ otg_set_vbus(musb->xceiv->otg, 1);
+ }
+ break;
}
break;
@@ -174,12 +139,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(musb->controller, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
- if (musb->gadget_driver)
- omap2430_musb_set_vbus(musb, 0);
-
- if (data->interface_type == MUSB_INTERFACE_UTMI)
- otg_set_vbus(musb->xceiv->otg, 0);
-
+ musb_set_peripheral(musb);
+ otg_set_vbus(musb->xceiv->otg, 0);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
break;
@@ -226,7 +187,6 @@ static int omap2430_musb_init(struct musb *musb)
u32 l;
int status = 0;
struct device *dev = musb->controller;
- struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
@@ -282,50 +242,17 @@ static int omap2430_musb_init(struct musb *musb)
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- if (glue->status != MUSB_UNKNOWN)
- omap_musb_set_mailbox(glue);
-
return 0;
}
static void omap2430_musb_enable(struct musb *musb)
{
- u8 devctl;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
- struct omap_musb_board_data *data = pdata->board_data;
-
-
- switch (glue->status) {
-
- case MUSB_ID_GROUND:
- omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST);
- if (data->interface_type != MUSB_INTERFACE_UTMI)
- break;
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_err(dev, "configured as A device timeout");
- break;
- }
- }
- break;
- case MUSB_VBUS_VALID:
- omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
- break;
-
- default:
- break;
- }
+ if (glue->status == MUSB_UNKNOWN)
+ glue->status = MUSB_VBUS_OFF;
+ omap_musb_set_mailbox(glue);
}
static void omap2430_musb_disable(struct musb *musb)
@@ -361,8 +288,6 @@ static const struct musb_platform_ops omap2430_ops = {
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_vbus = omap2430_musb_set_vbus,
-
.enable = omap2430_musb_enable,
.disable = omap2430_musb_disable,
@@ -552,6 +477,9 @@ static int omap2430_runtime_resume(struct device *dev)
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
+ /* Wait for musb to get oriented. Otherwise we can get babble */
+ usleep_range(200000, 250000);
+
return 0;
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 832a41f9ee7d..f3f76f2ac63f 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -407,7 +407,7 @@ static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
return SUNXI_MUSB_TXFUNCADDR + offset;
}
-static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
{
struct sunxi_glue *glue;
@@ -520,7 +520,7 @@ static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
(int)(addr - sunxi_musb->mregs));
}
-static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+static u16 sunxi_musb_readw(void __iomem *addr, u32 offset)
{
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
@@ -781,6 +781,8 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pinfo.name = "musb-hdrc";
pinfo.id = PLATFORM_DEVID_AUTO;
pinfo.parent = &pdev->dev;
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
pinfo.res = pdev->resource;
pinfo.num_res = pdev->num_resources;
pinfo.data = &pdata;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 39453287b5c3..5d449089e3ad 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -142,7 +142,7 @@ static void tusb_ep_select(void __iomem *mbase, u8 epnum)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
*/
-static u8 tusb_readb(const void __iomem *addr, unsigned offset)
+static u8 tusb_readb(void __iomem *addr, u32 offset)
{
u16 tmp;
u8 val;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d19bb3e89da6..d5cf5e8bb1ca 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -310,9 +310,9 @@ static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
dma_channel->max_len = SZ_16M;
ux500_channel->dma_chan =
- dma_request_slave_channel(dev, chan_names[ch_num]);
+ dma_request_chan(dev, chan_names[ch_num]);
- if (!ux500_channel->dma_chan)
+ if (IS_ERR(ux500_channel->dma_chan))
ux500_channel->dma_chan =
dma_request_channel(mask,
data ?
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 24b4f091acb8..ff24fca0a2d9 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -162,7 +162,7 @@ config USB_MXS_PHY
config USB_TEGRA_PHY
tristate "NVIDIA Tegra USB PHY Driver"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
select USB_COMMON
select USB_PHY
select USB_ULPI
@@ -172,7 +172,7 @@ config USB_TEGRA_PHY
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || COMPILE_TEST
select USB_ULPI_VIEWPORT
help
Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 4bb4b1d42f32..20c0f082bf9c 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -108,7 +108,8 @@ enum ab8500_usb_mode {
USB_IDLE = 0,
USB_PERIPHERAL,
USB_HOST,
- USB_DEDICATED_CHG
+ USB_DEDICATED_CHG,
+ USB_UART
};
/* Register USB_LINK_STATUS interrupt */
@@ -393,6 +394,24 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
+ /*
+ * FIXME: For now we rely on the boot firmware to set up the necessary
+ * PHY/pin configuration for UART mode.
+ *
+ * AB8505 does not seem to report any status change for UART cables,
+ * possibly because it cannot detect them autonomously.
+ * We may need to measure the ID resistance manually to reliably
+ * detect UART cables after bootup.
+ */
+ case USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505:
+ case USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505:
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_UART;
+ ab8500_usb_peri_phy_en(ab);
+ }
+
+ break;
+
default:
break;
}
@@ -566,6 +585,11 @@ static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
ab->vbus_draw = 0;
}
+ if (ab->mode == USB_UART) {
+ ab8500_usb_peri_phy_dis(ab);
+ ab->mode = USB_IDLE;
+ }
+
if (is_ab8500_2p0(ab->ab8500)) {
if (ab->mode == USB_DEDICATED_CHG) {
ab8500_usb_wd_linkstatus(ab,
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index f5f0568d8533..8524475d942d 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -57,7 +57,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
- ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
+ ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen);
if (ret)
return ret;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index a53b89be5324..661a229c105d 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -21,8 +21,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "phy-generic.h"
@@ -204,8 +203,7 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
return 0;
}
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
- struct usb_phy_generic_platform_data *pdata)
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
{
enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err = 0;
@@ -221,28 +219,15 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
needs_vcc = of_property_read_bool(node, "vcc-supply");
needs_clk = of_property_read_bool(node, "clocks");
- nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_ASIS);
- err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
- if (!err) {
- nop->gpiod_vbus = devm_gpiod_get_optional(dev,
- "vbus-detect",
- GPIOD_ASIS);
- err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
- }
- } else if (pdata) {
- type = pdata->type;
- clk_rate = pdata->clk_rate;
- needs_vcc = pdata->needs_vcc;
- if (gpio_is_valid(pdata->gpio_reset)) {
- err = devm_gpio_request_one(dev, pdata->gpio_reset,
- GPIOF_ACTIVE_LOW,
- dev_name(dev));
- if (!err)
- nop->gpiod_reset =
- gpio_to_desc(pdata->gpio_reset);
- }
- nop->gpiod_vbus = pdata->gpiod_vbus;
+ }
+ nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
+ if (!err) {
+ nop->gpiod_vbus = devm_gpiod_get_optional(dev,
+ "vbus-detect",
+ GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
if (err == -EPROBE_DEFER)
@@ -308,7 +293,7 @@ static int usb_phy_generic_probe(struct platform_device *pdev)
if (!nop)
return -ENOMEM;
- err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
+ err = usb_phy_gen_create_phy(dev, nop);
if (err)
return err;
if (nop->gpiod_vbus) {
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index 97289627561d..7ee80211a688 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -22,7 +22,6 @@ struct usb_phy_generic {
int usb_gen_phy_init(struct usb_phy *phy);
void usb_gen_phy_shutdown(struct usb_phy *phy);
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
- struct usb_phy_generic_platform_data *pdata);
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop);
#endif
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 553e2573c74f..f13f5530746c 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -17,7 +17,6 @@
#include <linux/regulator/consumer.h>
#include <linux/usb/gadget.h>
-#include <linux/usb/gpio_vbus.h>
#include <linux/usb/otg.h>
@@ -29,6 +28,8 @@
* Needs to be loaded before the UDC driver that will use it.
*/
struct gpio_vbus_data {
+ struct gpio_desc *vbus_gpiod;
+ struct gpio_desc *pullup_gpiod;
struct usb_phy phy;
struct device *dev;
struct regulator *vbus_draw;
@@ -83,38 +84,30 @@ static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
gpio_vbus->mA = mA;
}
-static int is_vbus_powered(struct gpio_vbus_mach_info *pdata)
+static int is_vbus_powered(struct gpio_vbus_data *gpio_vbus)
{
- int vbus;
-
- vbus = gpio_get_value(pdata->gpio_vbus);
- if (pdata->gpio_vbus_inverted)
- vbus = !vbus;
-
- return vbus;
+ return gpiod_get_value(gpio_vbus->vbus_gpiod);
}
static void gpio_vbus_work(struct work_struct *work)
{
struct gpio_vbus_data *gpio_vbus =
container_of(work, struct gpio_vbus_data, work.work);
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(gpio_vbus->dev);
- int gpio, status, vbus;
+ int status, vbus;
if (!gpio_vbus->phy.otg->gadget)
return;
- vbus = is_vbus_powered(pdata);
+ vbus = is_vbus_powered(gpio_vbus);
if ((vbus ^ gpio_vbus->vbus) == 0)
return;
gpio_vbus->vbus = vbus;
/* Peripheral controllers which manage the pullup themselves won't have
- * gpio_pullup configured here. If it's configured here, we'll do what
- * isp1301_omap::b_peripheral() does and enable the pullup here... although
- * that may complicate usb_gadget_{,dis}connect() support.
+ * a pullup GPIO configured here. If it's configured here, we'll do
+ * what isp1301_omap::b_peripheral() does and enable the pullup here...
+ * although that may complicate usb_gadget_{,dis}connect() support.
*/
- gpio = pdata->gpio_pullup;
if (vbus) {
status = USB_EVENT_VBUS;
@@ -126,16 +119,16 @@ static void gpio_vbus_work(struct work_struct *work)
set_vbus_draw(gpio_vbus, 100);
/* optionally enable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 1);
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_ENUMERATED);
} else {
/* optionally disable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
@@ -154,12 +147,11 @@ static void gpio_vbus_work(struct work_struct *work)
static irqreturn_t gpio_vbus_irq(int irq, void *data)
{
struct platform_device *pdev = data;
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
struct usb_otg *otg = gpio_vbus->phy.otg;
dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
- is_vbus_powered(pdata) ? "supplied" : "inactive",
+ is_vbus_powered(gpio_vbus) ? "supplied" : "inactive",
otg->gadget ? otg->gadget->name : "none");
if (otg->gadget)
@@ -175,22 +167,18 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct gpio_vbus_data *gpio_vbus;
- struct gpio_vbus_mach_info *pdata;
struct platform_device *pdev;
- int gpio;
gpio_vbus = container_of(otg->usb_phy, struct gpio_vbus_data, phy);
pdev = to_platform_device(gpio_vbus->dev);
- pdata = dev_get_platdata(gpio_vbus->dev);
- gpio = pdata->gpio_pullup;
if (!gadget) {
dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
otg->gadget->name);
/* optionally disable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
@@ -242,16 +230,12 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
static int gpio_vbus_probe(struct platform_device *pdev)
{
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus;
struct resource *res;
- int err, gpio, irq;
+ struct device *dev = &pdev->dev;
+ int err, irq;
unsigned long irqflags;
- if (!pdata || !gpio_is_valid(pdata->gpio_vbus))
- return -EINVAL;
- gpio = pdata->gpio_vbus;
-
gpio_vbus = devm_kzalloc(&pdev->dev, sizeof(struct gpio_vbus_data),
GFP_KERNEL);
if (!gpio_vbus)
@@ -273,37 +257,43 @@ static int gpio_vbus_probe(struct platform_device *pdev)
gpio_vbus->phy.otg->usb_phy = &gpio_vbus->phy;
gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral;
- err = devm_gpio_request(&pdev->dev, gpio, "vbus_detect");
- if (err) {
- dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n",
- gpio, err);
+ /* Look up the VBUS sensing GPIO */
+ gpio_vbus->vbus_gpiod = devm_gpiod_get(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(gpio_vbus->vbus_gpiod)) {
+ err = PTR_ERR(gpio_vbus->vbus_gpiod);
+ dev_err(&pdev->dev, "can't request vbus gpio, err: %d\n", err);
return err;
}
- gpio_direction_input(gpio);
+ gpiod_set_consumer_name(gpio_vbus->vbus_gpiod, "vbus_detect");
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res) {
irq = res->start;
irqflags = (res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
} else {
- irq = gpio_to_irq(gpio);
+ irq = gpiod_to_irq(gpio_vbus->vbus_gpiod);
irqflags = VBUS_IRQ_FLAGS;
}
gpio_vbus->irq = irq;
- /* if data line pullup is in use, initialize it to "not pulling up" */
- gpio = pdata->gpio_pullup;
- if (gpio_is_valid(gpio)) {
- err = devm_gpio_request(&pdev->dev, gpio, "udc_pullup");
- if (err) {
- dev_err(&pdev->dev,
- "can't request pullup gpio %d, err: %d\n",
- gpio, err);
- return err;
- }
- gpio_direction_output(gpio, pdata->gpio_pullup_inverted);
+ /*
+ * The VBUS sensing GPIO should have a pulldown, which will normally be
+ * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
+ * value the GPIO detects as active. Some systems will use comparators.
+ * Get the optional D+ or D- pullup GPIO. If the data line pullup is
+ * in use, initialize it to "not pulling up"
+ */
+ gpio_vbus->pullup_gpiod = devm_gpiod_get_optional(dev, "pullup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_vbus->pullup_gpiod)) {
+ err = PTR_ERR(gpio_vbus->pullup_gpiod);
+ dev_err(&pdev->dev, "can't request pullup gpio, err: %d\n",
+ err);
+ return err;
}
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_consumer_name(gpio_vbus->pullup_gpiod, "udc_pullup");
err = devm_request_irq(&pdev->dev, irq, gpio_vbus_irq, irqflags,
"vbus_detect", pdev);
@@ -330,7 +320,7 @@ static int gpio_vbus_probe(struct platform_device *pdev)
return err;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ /* TODO: wakeup could be enabled here with device_init_wakeup(dev, 1) */
return 0;
}
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 110e6e9ad621..9c226b57153b 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -76,7 +76,7 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
- ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen, NULL);
+ ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen);
if (ret)
return ret;
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index ea7ef1dc0b42..037e8eee737d 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -9,54 +9,56 @@
* Venu Byravarasu <vbyravarasu@nvidia.com>
*/
-#include <linux/resource.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/iopoll.h>
#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/of.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/regulator/consumer.h>
+
#include <linux/usb/ehci_def.h>
+#include <linux/usb/of.h>
#include <linux/usb/tegra_usb_phy.h>
-#include <linux/regulator/consumer.h>
+#include <linux/usb/ulpi.h>
-#define ULPI_VIEWPORT 0x170
+#define ULPI_VIEWPORT 0x170
/* PORTSC PTS/PHCD bits, Tegra20 only */
-#define TEGRA_USB_PORTSC1 0x184
-#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
-#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+#define TEGRA_USB_PORTSC1 0x184
+#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD BIT(23)
/* HOSTPC1 PTS/PHCD bits, Tegra30 and above */
-#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
-#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
-#define TEGRA_USB_HOSTPC1_DEVLC_PHCD (1 << 22)
+#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
+#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
+#define TEGRA_USB_HOSTPC1_DEVLC_PHCD BIT(22)
/* Bits of PORTSC1, which will get cleared by writing 1 into them */
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-#define USB_SUSP_CTRL 0x400
-#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
-#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
-#define USB_SUSP_CLR (1 << 5)
-#define USB_PHY_CLK_VALID (1 << 7)
-#define UTMIP_RESET (1 << 11)
-#define UHSIC_RESET (1 << 11)
-#define UTMIP_PHY_ENABLE (1 << 12)
-#define ULPI_PHY_ENABLE (1 << 13)
-#define USB_SUSP_SET (1 << 14)
-#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
-
-#define USB1_LEGACY_CTRL 0x410
-#define USB1_NO_LEGACY_MODE (1 << 0)
+#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_CNNT_EN_DEV BIT(3)
+#define USB_WAKE_ON_DISCON_EN_DEV BIT(4)
+#define USB_SUSP_CLR BIT(5)
+#define USB_PHY_CLK_VALID BIT(7)
+#define UTMIP_RESET BIT(11)
+#define UHSIC_RESET BIT(11)
+#define UTMIP_PHY_ENABLE BIT(12)
+#define ULPI_PHY_ENABLE BIT(13)
+#define USB_SUSP_SET BIT(14)
+#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+
+#define USB1_LEGACY_CTRL 0x410
+#define USB1_NO_LEGACY_MODE BIT(0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
@@ -64,94 +66,94 @@
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
-#define ULPI_TIMING_CTRL_0 0x424
-#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
-#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
+#define ULPI_TIMING_CTRL_0 0x424
+#define ULPI_OUTPUT_PINMUX_BYP BIT(10)
+#define ULPI_CLKOUT_PINMUX_BYP BIT(11)
-#define ULPI_TIMING_CTRL_1 0x428
-#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
-#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
-#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
-#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
-#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
-#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
+#define ULPI_TIMING_CTRL_1 0x428
+#define ULPI_DATA_TRIMMER_LOAD BIT(0)
+#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
+#define ULPI_STPDIRNXT_TRIMMER_LOAD BIT(16)
+#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
+#define ULPI_DIR_TRIMMER_LOAD BIT(24)
+#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
-#define UTMIP_PLL_CFG1 0x804
+#define UTMIP_PLL_CFG1 0x804
#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
-#define UTMIP_XCVR_CFG0 0x808
+#define UTMIP_XCVR_CFG0 0x808
#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
#define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22)
#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
-#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
-#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
-#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
-#define UTMIP_XCVR_LSBIAS_SEL (1 << 21)
+#define UTMIP_FORCE_PD_POWERDOWN BIT(14)
+#define UTMIP_FORCE_PD2_POWERDOWN BIT(16)
+#define UTMIP_FORCE_PDZI_POWERDOWN BIT(18)
+#define UTMIP_XCVR_LSBIAS_SEL BIT(21)
#define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4)
#define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25)
-#define UTMIP_BIAS_CFG0 0x80c
-#define UTMIP_OTGPD (1 << 11)
-#define UTMIP_BIASPD (1 << 10)
-#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
-#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
-#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
+#define UTMIP_BIAS_CFG0 0x80c
+#define UTMIP_OTGPD BIT(11)
+#define UTMIP_BIASPD BIT(10)
+#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
+#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
+#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
-#define UTMIP_HSRX_CFG0 0x810
-#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
-#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
+#define UTMIP_HSRX_CFG0 0x810
+#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
+#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
-#define UTMIP_HSRX_CFG1 0x814
-#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+#define UTMIP_HSRX_CFG1 0x814
+#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
-#define UTMIP_TX_CFG0 0x820
-#define UTMIP_FS_PREABMLE_J (1 << 19)
-#define UTMIP_HS_DISCON_DISABLE (1 << 8)
+#define UTMIP_TX_CFG0 0x820
+#define UTMIP_FS_PREABMLE_J BIT(19)
+#define UTMIP_HS_DISCON_DISABLE BIT(8)
-#define UTMIP_MISC_CFG0 0x824
-#define UTMIP_DPDM_OBSERVE (1 << 26)
-#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
-#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
+#define UTMIP_MISC_CFG0 0x824
+#define UTMIP_DPDM_OBSERVE BIT(26)
+#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
+#define UTMIP_SUSPEND_EXIT_ON_EDGE BIT(22)
-#define UTMIP_MISC_CFG1 0x828
-#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
-#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+#define UTMIP_MISC_CFG1 0x828
+#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
+#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
-#define UTMIP_DEBOUNCE_CFG0 0x82c
-#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
+#define UTMIP_DEBOUNCE_CFG0 0x82c
+#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
-#define UTMIP_BAT_CHRG_CFG0 0x830
-#define UTMIP_PD_CHRG (1 << 0)
+#define UTMIP_BAT_CHRG_CFG0 0x830
+#define UTMIP_PD_CHRG BIT(0)
-#define UTMIP_SPARE_CFG0 0x834
-#define FUSE_SETUP_SEL (1 << 3)
+#define UTMIP_SPARE_CFG0 0x834
+#define FUSE_SETUP_SEL BIT(3)
-#define UTMIP_XCVR_CFG1 0x838
-#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
-#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2)
-#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4)
-#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
+#define UTMIP_XCVR_CFG1 0x838
+#define UTMIP_FORCE_PDDISC_POWERDOWN BIT(0)
+#define UTMIP_FORCE_PDCHRP_POWERDOWN BIT(2)
+#define UTMIP_FORCE_PDDR_POWERDOWN BIT(4)
+#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
-#define UTMIP_BIAS_CFG1 0x83c
-#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+#define UTMIP_BIAS_CFG1 0x83c
+#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
/* For Tegra30 and above only, the address is different in Tegra20 */
-#define USB_USBMODE 0x1f8
-#define USB_USBMODE_MASK (3 << 0)
-#define USB_USBMODE_HOST (3 << 0)
-#define USB_USBMODE_DEVICE (2 << 0)
+#define USB_USBMODE 0x1f8
+#define USB_USBMODE_MASK (3 << 0)
+#define USB_USBMODE_HOST (3 << 0)
+#define USB_USBMODE_DEVICE (2 << 0)
static DEFINE_SPINLOCK(utmip_pad_lock);
-static int utmip_pad_count;
+static unsigned int utmip_pad_count;
struct tegra_xtal_freq {
- int freq;
+ unsigned int freq;
u8 enable_delay;
u8 stable_count;
u8 active_delay;
@@ -194,43 +196,49 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
},
};
+static inline struct tegra_usb_phy *to_tegra_usb_phy(struct usb_phy *u_phy)
+{
+ return container_of(u_phy, struct tegra_usb_phy, u_phy);
+}
+
static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
{
void __iomem *base = phy->regs;
- unsigned long val;
+ u32 val;
if (phy->soc_config->has_hostpc) {
- val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0);
val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val);
- writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
- val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ val = readl_relaxed(base + TEGRA_USB_PORTSC1);
+ val &= ~TEGRA_PORTSC1_RWC_BITS;
val &= ~TEGRA_USB_PORTSC1_PTS(~0);
val |= TEGRA_USB_PORTSC1_PTS(pts_val);
- writel(val, base + TEGRA_USB_PORTSC1);
+ writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
static void set_phcd(struct tegra_usb_phy *phy, bool enable)
{
void __iomem *base = phy->regs;
- unsigned long val;
+ u32 val;
if (phy->soc_config->has_hostpc) {
- val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
if (enable)
val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD;
else
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD;
- writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
- val = readl(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
+ val = readl_relaxed(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
if (enable)
val |= TEGRA_USB_PORTSC1_PHCD;
else
val &= ~TEGRA_USB_PORTSC1_PHCD;
- writel(val, base + TEGRA_USB_PORTSC1);
+ writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
@@ -238,23 +246,6 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
{
int ret;
- phy->pad_clk = devm_clk_get(phy->u_phy.dev, "utmi-pads");
- if (IS_ERR(phy->pad_clk)) {
- ret = PTR_ERR(phy->pad_clk);
- dev_err(phy->u_phy.dev,
- "Failed to get UTMIP pad clock: %d\n", ret);
- return ret;
- }
-
- phy->pad_rst = devm_reset_control_get_optional_shared(
- phy->u_phy.dev, "utmi-pads");
- if (IS_ERR(phy->pad_rst)) {
- ret = PTR_ERR(phy->pad_rst);
- dev_err(phy->u_phy.dev,
- "Failed to get UTMI-pads reset: %d\n", ret);
- return ret;
- }
-
ret = clk_prepare_enable(phy->pad_clk);
if (ret) {
dev_err(phy->u_phy.dev,
@@ -315,18 +306,21 @@ static int utmip_pad_close(struct tegra_usb_phy *phy)
return ret;
}
-static void utmip_pad_power_on(struct tegra_usb_phy *phy)
+static int utmip_pad_power_on(struct tegra_usb_phy *phy)
{
- unsigned long val, flags;
- void __iomem *base = phy->pad_regs;
struct tegra_utmip_config *config = phy->config;
+ void __iomem *base = phy->pad_regs;
+ u32 val;
+ int err;
- clk_prepare_enable(phy->pad_clk);
+ err = clk_prepare_enable(phy->pad_clk);
+ if (err)
+ return err;
- spin_lock_irqsave(&utmip_pad_lock, flags);
+ spin_lock(&utmip_pad_lock);
if (utmip_pad_count++ == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
if (phy->soc_config->requires_extra_tuning_parameters) {
@@ -338,53 +332,59 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level);
val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level);
}
- writel(val, base + UTMIP_BIAS_CFG0);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
- spin_unlock_irqrestore(&utmip_pad_lock, flags);
+ spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
+
+ return 0;
}
static int utmip_pad_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val, flags;
void __iomem *base = phy->pad_regs;
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(phy->pad_clk);
+ if (ret)
+ return ret;
+
+ spin_lock(&utmip_pad_lock);
if (!utmip_pad_count) {
dev_err(phy->u_phy.dev, "UTMIP pad already powered off\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto ulock;
}
- clk_prepare_enable(phy->pad_clk);
-
- spin_lock_irqsave(&utmip_pad_lock, flags);
-
if (--utmip_pad_count == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
- writel(val, base + UTMIP_BIAS_CFG0);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
-
- spin_unlock_irqrestore(&utmip_pad_lock, flags);
+ulock:
+ spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
- return 0;
+ return ret;
}
static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
u32 tmp;
- return readl_poll_timeout(reg, tmp, (tmp & mask) == result,
- 2000, 6000);
+ return readl_relaxed_poll_timeout(reg, tmp, (tmp & mask) == result,
+ 2000, 6000);
}
static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
/*
* The USB driver may have already initiated the phy clock
@@ -395,27 +395,28 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
return;
if (phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- udelay(10);
+ usleep_range(10, 100);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
- } else
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ } else {
set_phcd(phy, true);
+ }
- if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on disable\n");
}
static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
/*
* The USB driver may have already initiated the phy clock
@@ -427,97 +428,101 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
return;
if (phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- udelay(10);
+ usleep_range(10, 100);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
- } else
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ } else {
set_phcd(phy, false);
+ }
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
- USB_PHY_CLK_VALID))
+ USB_PHY_CLK_VALID))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on enable\n");
}
static int utmi_phy_power_on(struct tegra_usb_phy *phy)
{
- unsigned long val;
- void __iomem *base = phy->regs;
struct tegra_utmip_config *config = phy->config;
+ void __iomem *base = phy->regs;
+ u32 val;
+ int err;
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
- val = readl(base + USB1_LEGACY_CTRL);
+ val = readl_relaxed(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
- writel(val, base + USB1_LEGACY_CTRL);
+ writel_relaxed(val, base + USB1_LEGACY_CTRL);
}
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_FS_PREABMLE_J;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
- val = readl(base + UTMIP_HSRX_CFG0);
+ val = readl_relaxed(base + UTMIP_HSRX_CFG0);
val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
- writel(val, base + UTMIP_HSRX_CFG0);
+ writel_relaxed(val, base + UTMIP_HSRX_CFG0);
- val = readl(base + UTMIP_HSRX_CFG1);
+ val = readl_relaxed(base + UTMIP_HSRX_CFG1);
val &= ~UTMIP_HS_SYNC_START_DLY(~0);
val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
- writel(val, base + UTMIP_HSRX_CFG1);
+ writel_relaxed(val, base + UTMIP_HSRX_CFG1);
- val = readl(base + UTMIP_DEBOUNCE_CFG0);
+ val = readl_relaxed(base + UTMIP_DEBOUNCE_CFG0);
val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
- writel(val, base + UTMIP_DEBOUNCE_CFG0);
+ writel_relaxed(val, base + UTMIP_DEBOUNCE_CFG0);
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
- writel(val, base + UTMIP_MISC_CFG0);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
if (!phy->soc_config->utmi_pll_config_in_car_module) {
- val = readl(base + UTMIP_MISC_CFG1);
+ val = readl_relaxed(base + UTMIP_MISC_CFG1);
val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) |
UTMIP_PLLU_STABLE_COUNT(~0));
val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
- writel(val, base + UTMIP_MISC_CFG1);
+ writel_relaxed(val, base + UTMIP_MISC_CFG1);
- val = readl(base + UTMIP_PLL_CFG1);
+ val = readl_relaxed(base + UTMIP_PLL_CFG1);
val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) |
UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
- writel(val, base + UTMIP_PLL_CFG1);
+ writel_relaxed(val, base + UTMIP_PLL_CFG1);
}
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
} else {
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
}
- utmip_pad_power_on(phy);
+ err = utmip_pad_power_on(phy);
+ if (err)
+ return err;
- val = readl(base + UTMIP_XCVR_CFG0);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL |
UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) |
@@ -535,57 +540,57 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew);
val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew);
}
- writel(val, base + UTMIP_XCVR_CFG0);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
- val = readl(base + UTMIP_XCVR_CFG1);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
- writel(val, base + UTMIP_XCVR_CFG1);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG1);
- val = readl(base + UTMIP_BIAS_CFG1);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
- writel(val, base + UTMIP_BIAS_CFG1);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG1);
- val = readl(base + UTMIP_SPARE_CFG0);
+ val = readl_relaxed(base + UTMIP_SPARE_CFG0);
if (config->xcvr_setup_use_fuses)
val |= FUSE_SETUP_SEL;
else
val &= ~FUSE_SETUP_SEL;
- writel(val, base + UTMIP_SPARE_CFG0);
+ writel_relaxed(val, base + UTMIP_SPARE_CFG0);
if (!phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
- val = readl(base + USB1_LEGACY_CTRL);
+ val = readl_relaxed(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
- writel(val, base + USB1_LEGACY_CTRL);
+ writel_relaxed(val, base + USB1_LEGACY_CTRL);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
utmi_phy_clk_enable(phy);
if (phy->soc_config->requires_usbmode_setup) {
- val = readl(base + USB_USBMODE);
+ val = readl_relaxed(base + USB_USBMODE);
val &= ~USB_USBMODE_MASK;
if (phy->mode == USB_DR_MODE_HOST)
val |= USB_USBMODE_HOST;
else
val |= USB_USBMODE_DEVICE;
- writel(val, base + USB_USBMODE);
+ writel_relaxed(val, base + USB_USBMODE);
}
if (!phy->is_legacy_phy)
@@ -596,258 +601,252 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
static int utmi_phy_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
utmi_phy_clk_disable(phy);
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
- val = readl(base + UTMIP_XCVR_CFG0);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG0);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
- val = readl(base + UTMIP_XCVR_CFG1);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG1);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG1);
return utmip_pad_power_off(phy);
}
static void utmi_phy_preresume(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_HS_DISCON_DISABLE;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_postresume(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val &= ~UTMIP_HS_DISCON_DISABLE;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
else
val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(1);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(1, 10);
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val |= UTMIP_DPDM_OBSERVE;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(10);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(10, 100);
}
static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(10);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(10, 100);
}
static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
{
- int ret;
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
+ int err;
- ret = gpio_direction_output(phy->reset_gpio, 0);
- if (ret < 0) {
- dev_err(phy->u_phy.dev, "GPIO %d not set to 0: %d\n",
- phy->reset_gpio, ret);
- return ret;
- }
- msleep(5);
- ret = gpio_direction_output(phy->reset_gpio, 1);
- if (ret < 0) {
- dev_err(phy->u_phy.dev, "GPIO %d not set to 1: %d\n",
- phy->reset_gpio, ret);
- return ret;
- }
+ gpiod_set_value_cansleep(phy->reset_gpio, 1);
+
+ err = clk_prepare_enable(phy->clk);
+ if (err)
+ return err;
+
+ usleep_range(5000, 6000);
+
+ gpiod_set_value_cansleep(phy->reset_gpio, 0);
- clk_prepare_enable(phy->clk);
- msleep(1);
+ usleep_range(1000, 2000);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UHSIC_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + ULPI_TIMING_CTRL_0);
+ val = readl_relaxed(base + ULPI_TIMING_CTRL_0);
val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
- writel(val, base + ULPI_TIMING_CTRL_0);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_0);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= ULPI_PHY_ENABLE;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
val = 0;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
val |= ULPI_DATA_TRIMMER_SEL(4);
val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
val |= ULPI_DIR_TRIMMER_SEL(4);
- writel(val, base + ULPI_TIMING_CTRL_1);
- udelay(10);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
+ usleep_range(10, 100);
val |= ULPI_DATA_TRIMMER_LOAD;
val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
val |= ULPI_DIR_TRIMMER_LOAD;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
/* Fix VbusInvalid due to floating VBUS */
- ret = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
- if (ret) {
- dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
- return ret;
+ err = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
+ if (err) {
+ dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+ goto disable_clk;
}
- ret = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
- if (ret) {
- dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
- return ret;
+ err = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
+ if (err) {
+ dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+ goto disable_clk;
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
- udelay(100);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ usleep_range(100, 1000);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
return 0;
-}
-static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
-{
- clk_disable(phy->clk);
- return gpio_direction_output(phy->reset_gpio, 0);
+disable_clk:
+ clk_disable_unprepare(phy->clk);
+
+ return err;
}
-static void tegra_usb_phy_close(struct tegra_usb_phy *phy)
+static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
- if (!IS_ERR(phy->vbus))
- regulator_disable(phy->vbus);
+ gpiod_set_value_cansleep(phy->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ clk_disable_unprepare(phy->clk);
- if (!phy->is_ulpi_phy)
- utmip_pad_close(phy);
-
- clk_disable_unprepare(phy->pll_u);
+ return 0;
}
static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
+ int err;
+
+ if (phy->powered_on)
+ return 0;
+
if (phy->is_ulpi_phy)
- return ulpi_phy_power_on(phy);
+ err = ulpi_phy_power_on(phy);
else
- return utmi_phy_power_on(phy);
+ err = utmi_phy_power_on(phy);
+ if (err)
+ return err;
+
+ phy->powered_on = true;
+
+ return 0;
}
static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
+ int err;
+
+ if (!phy->powered_on)
+ return 0;
+
if (phy->is_ulpi_phy)
- return ulpi_phy_power_off(phy);
+ err = ulpi_phy_power_off(phy);
else
- return utmi_phy_power_off(phy);
-}
+ err = utmi_phy_power_off(phy);
+ if (err)
+ return err;
-static int tegra_usb_phy_suspend(struct usb_phy *x, int suspend)
-{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
- if (suspend)
- return tegra_usb_phy_power_off(phy);
- else
- return tegra_usb_phy_power_on(phy);
+ phy->powered_on = false;
+
+ return 0;
}
-static int ulpi_open(struct tegra_usb_phy *phy)
+static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
{
- int err;
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
- phy->clk = devm_clk_get(phy->u_phy.dev, "ulpi-link");
- if (IS_ERR(phy->clk)) {
- err = PTR_ERR(phy->clk);
- dev_err(phy->u_phy.dev, "Failed to get ULPI clock: %d\n", err);
- return err;
- }
+ if (WARN_ON(!phy->freq))
+ return;
- err = devm_gpio_request(phy->u_phy.dev, phy->reset_gpio,
- "ulpi_phy_reset_b");
- if (err < 0) {
- dev_err(phy->u_phy.dev, "Request failed for GPIO %d: %d\n",
- phy->reset_gpio, err);
- return err;
- }
+ tegra_usb_phy_power_off(phy);
- err = gpio_direction_output(phy->reset_gpio, 0);
- if (err < 0) {
- dev_err(phy->u_phy.dev,
- "GPIO %d direction not set to output: %d\n",
- phy->reset_gpio, err);
- return err;
- }
+ if (!phy->is_ulpi_phy)
+ utmip_pad_close(phy);
- phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
- if (!phy->ulpi) {
- dev_err(phy->u_phy.dev, "Failed to create ULPI OTG\n");
- err = -ENOMEM;
- return err;
- }
+ regulator_disable(phy->vbus);
+ clk_disable_unprepare(phy->pll_u);
- phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT;
- return 0;
+ phy->freq = NULL;
}
-static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
+static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+ if (WARN_ON(!phy->freq))
+ return -EINVAL;
+
+ if (suspend)
+ return tegra_usb_phy_power_off(phy);
+ else
+ return tegra_usb_phy_power_on(phy);
+}
+
+static int tegra_usb_phy_init(struct usb_phy *u_phy)
+{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
unsigned long parent_rate;
- int i;
+ unsigned int i;
int err;
- phy->pll_u = devm_clk_get(phy->u_phy.dev, "pll_u");
- if (IS_ERR(phy->pll_u)) {
- err = PTR_ERR(phy->pll_u);
- dev_err(phy->u_phy.dev,
- "Failed to get pll_u clock: %d\n", err);
- return err;
- }
+ if (WARN_ON(phy->freq))
+ return 0;
err = clk_prepare_enable(phy->pll_u);
if (err)
@@ -864,64 +863,74 @@ static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
dev_err(phy->u_phy.dev, "Invalid pll_u parent rate %ld\n",
parent_rate);
err = -EINVAL;
- goto fail;
+ goto disable_clk;
}
- if (!IS_ERR(phy->vbus)) {
- err = regulator_enable(phy->vbus);
- if (err) {
- dev_err(phy->u_phy.dev,
- "Failed to enable USB VBUS regulator: %d\n",
- err);
- goto fail;
- }
+ err = regulator_enable(phy->vbus);
+ if (err) {
+ dev_err(phy->u_phy.dev,
+ "Failed to enable USB VBUS regulator: %d\n", err);
+ goto disable_clk;
}
- if (phy->is_ulpi_phy)
- err = ulpi_open(phy);
- else
+ if (!phy->is_ulpi_phy) {
err = utmip_pad_open(phy);
- if (err < 0)
- goto fail;
+ if (err)
+ goto disable_vbus;
+ }
+
+ err = tegra_usb_phy_power_on(phy);
+ if (err)
+ goto close_phy;
return 0;
-fail:
+close_phy:
+ if (!phy->is_ulpi_phy)
+ utmip_pad_close(phy);
+
+disable_vbus:
+ regulator_disable(phy->vbus);
+
+disable_clk:
clk_disable_unprepare(phy->pll_u);
+
+ phy->freq = NULL;
+
return err;
}
-void tegra_usb_phy_preresume(struct usb_phy *x)
+void tegra_usb_phy_preresume(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_preresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
-void tegra_usb_phy_postresume(struct usb_phy *x)
+void tegra_usb_phy_postresume(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_postresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
-void tegra_ehci_phy_restore_start(struct usb_phy *x,
- enum tegra_usb_phy_port_speed port_speed)
+void tegra_ehci_phy_restore_start(struct usb_phy *u_phy,
+ enum tegra_usb_phy_port_speed port_speed)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_start(phy, port_speed);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
-void tegra_ehci_phy_restore_end(struct usb_phy *x)
+void tegra_ehci_phy_restore_end(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_end(phy);
@@ -932,21 +941,25 @@ static int read_utmi_param(struct platform_device *pdev, const char *param,
u8 *dest)
{
u32 value;
- int err = of_property_read_u32(pdev->dev.of_node, param, &value);
- *dest = (u8)value;
- if (err < 0)
+ int err;
+
+ err = of_property_read_u32(pdev->dev.of_node, param, &value);
+ if (err)
dev_err(&pdev->dev,
"Failed to read USB UTMI parameter %s: %d\n",
param, err);
+ else
+ *dest = value;
+
return err;
}
static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
struct platform_device *pdev)
{
+ struct tegra_utmip_config *config;
struct resource *res;
int err;
- struct tegra_utmip_config *config;
tegra_phy->is_ulpi_phy = false;
@@ -957,7 +970,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
}
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ resource_size(res));
if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI pad regs\n");
return -ENOMEM;
@@ -971,49 +984,49 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
config = tegra_phy->config;
err = read_utmi_param(pdev, "nvidia,hssync-start-delay",
- &config->hssync_start_delay);
- if (err < 0)
+ &config->hssync_start_delay);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,elastic-limit",
- &config->elastic_limit);
- if (err < 0)
+ &config->elastic_limit);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,idle-wait-delay",
- &config->idle_wait_delay);
- if (err < 0)
+ &config->idle_wait_delay);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,term-range-adj",
- &config->term_range_adj);
- if (err < 0)
+ &config->term_range_adj);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew",
- &config->xcvr_lsfslew);
- if (err < 0)
+ &config->xcvr_lsfslew);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew",
- &config->xcvr_lsrslew);
- if (err < 0)
+ &config->xcvr_lsrslew);
+ if (err)
return err;
if (tegra_phy->soc_config->requires_extra_tuning_parameters) {
err = read_utmi_param(pdev, "nvidia,xcvr-hsslew",
- &config->xcvr_hsslew);
- if (err < 0)
+ &config->xcvr_hsslew);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hssquelch-level",
- &config->hssquelch_level);
- if (err < 0)
+ &config->hssquelch_level);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hsdiscon-level",
- &config->hsdiscon_level);
- if (err < 0)
+ &config->hsdiscon_level);
+ if (err)
return err;
}
@@ -1022,8 +1035,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
if (!config->xcvr_setup_use_fuses) {
err = read_utmi_param(pdev, "nvidia,xcvr-setup",
- &config->xcvr_setup);
- if (err < 0)
+ &config->xcvr_setup);
+ if (err)
return err;
}
@@ -1053,23 +1066,20 @@ MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
static int tegra_usb_phy_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct resource *res;
- struct tegra_usb_phy *tegra_phy = NULL;
struct device_node *np = pdev->dev.of_node;
+ struct tegra_usb_phy *tegra_phy;
enum usb_phy_interface phy_type;
+ struct reset_control *reset;
+ struct gpio_desc *gpiod;
+ struct resource *res;
+ struct usb_phy *phy;
int err;
tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
if (!tegra_phy)
return -ENOMEM;
- match = of_match_device(tegra_usb_phy_id_table, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- tegra_phy->soc_config = match->data;
+ tegra_phy->soc_config = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1078,7 +1088,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
}
tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ resource_size(res));
if (!tegra_phy->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
return -ENOMEM;
@@ -1087,25 +1097,86 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->is_legacy_phy =
of_property_read_bool(np, "nvidia,has-legacy-mode");
+ if (of_find_property(np, "dr_mode", NULL))
+ tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
+ else
+ tegra_phy->mode = USB_DR_MODE_HOST;
+
+ if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(&pdev->dev, "dr_mode is invalid\n");
+ return -EINVAL;
+ }
+
+ /* On some boards, the VBUS regulator doesn't need to be controlled */
+ tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(tegra_phy->vbus))
+ return PTR_ERR(tegra_phy->vbus);
+
+ tegra_phy->pll_u = devm_clk_get(&pdev->dev, "pll_u");
+ err = PTR_ERR_OR_ZERO(tegra_phy->pll_u);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get pll_u clock: %d\n", err);
+ return err;
+ }
+
phy_type = of_usb_get_phy_mode(np);
switch (phy_type) {
case USBPHY_INTERFACE_MODE_UTMI:
err = utmi_phy_probe(tegra_phy, pdev);
- if (err < 0)
+ if (err)
return err;
+
+ tegra_phy->pad_clk = devm_clk_get(&pdev->dev, "utmi-pads");
+ err = PTR_ERR_OR_ZERO(tegra_phy->pad_clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get UTMIP pad clock: %d\n", err);
+ return err;
+ }
+
+ reset = devm_reset_control_get_optional_shared(&pdev->dev,
+ "utmi-pads");
+ err = PTR_ERR_OR_ZERO(reset);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get UTMI-pads reset: %d\n", err);
+ return err;
+ }
+ tegra_phy->pad_rst = reset;
break;
case USBPHY_INTERFACE_MODE_ULPI:
tegra_phy->is_ulpi_phy = true;
- tegra_phy->reset_gpio =
- of_get_named_gpio(np, "nvidia,phy-reset-gpio", 0);
- if (!gpio_is_valid(tegra_phy->reset_gpio)) {
+ tegra_phy->clk = devm_clk_get(&pdev->dev, "ulpi-link");
+ err = PTR_ERR_OR_ZERO(tegra_phy->clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get ULPI clock: %d\n", err);
+ return err;
+ }
+
+ gpiod = devm_gpiod_get_from_of_node(&pdev->dev, np,
+ "nvidia,phy-reset-gpio",
+ 0, GPIOD_OUT_HIGH,
+ "ulpi_phy_reset_b");
+ err = PTR_ERR_OR_ZERO(gpiod);
+ if (err) {
dev_err(&pdev->dev,
- "Invalid GPIO: %d\n", tegra_phy->reset_gpio);
- return tegra_phy->reset_gpio;
+ "Request failed for reset GPIO: %d\n", err);
+ return err;
+ }
+ tegra_phy->reset_gpio = gpiod;
+
+ phy = devm_otg_ulpi_create(&pdev->dev,
+ &ulpi_viewport_access_ops, 0);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to create ULPI OTG\n");
+ return -ENOMEM;
}
- tegra_phy->config = NULL;
+
+ tegra_phy->ulpi = phy;
+ tegra_phy->ulpi->io_priv = tegra_phy->regs + ULPI_VIEWPORT;
break;
default:
@@ -1114,40 +1185,16 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (of_find_property(np, "dr_mode", NULL))
- tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
- else
- tegra_phy->mode = USB_DR_MODE_HOST;
-
- if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
- dev_err(&pdev->dev, "dr_mode is invalid\n");
- return -EINVAL;
- }
-
- /* On some boards, the VBUS regulator doesn't need to be controlled */
- if (of_find_property(np, "vbus-supply", NULL)) {
- tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
- if (IS_ERR(tegra_phy->vbus))
- return PTR_ERR(tegra_phy->vbus);
- } else {
- dev_notice(&pdev->dev, "no vbus regulator");
- tegra_phy->vbus = ERR_PTR(-ENODEV);
- }
-
tegra_phy->u_phy.dev = &pdev->dev;
- err = tegra_usb_phy_init(tegra_phy);
- if (err < 0)
- return err;
-
- tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend;
+ tegra_phy->u_phy.init = tegra_usb_phy_init;
+ tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
+ tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
platform_set_drvdata(pdev, tegra_phy);
err = usb_add_phy_dev(&tegra_phy->u_phy);
- if (err < 0) {
- tegra_usb_phy_close(tegra_phy);
+ if (err)
return err;
- }
return 0;
}
@@ -1157,7 +1204,6 @@ static int tegra_usb_phy_remove(struct platform_device *pdev)
struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev);
usb_remove_phy(&tegra_phy->u_phy);
- tegra_usb_phy_close(tegra_phy);
return 0;
}
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index a43c49369a60..e683a37e3a7a 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -240,6 +240,21 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
+static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ phy->label = "ULPI";
+ phy->flags = flags;
+ phy->io_ops = ops;
+ phy->otg = otg;
+ phy->init = ulpi_init;
+
+ otg->usb_phy = phy;
+ otg->set_host = ulpi_set_host;
+ otg->set_vbus = ulpi_set_vbus;
+}
+
struct usb_phy *
otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
@@ -257,17 +272,32 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
return NULL;
}
- phy->label = "ULPI";
- phy->flags = flags;
- phy->io_ops = ops;
- phy->otg = otg;
- phy->init = ulpi_init;
-
- otg->usb_phy = phy;
- otg->set_host = ulpi_set_host;
- otg->set_vbus = ulpi_set_vbus;
+ otg_ulpi_init(phy, otg, ops, flags);
return phy;
}
EXPORT_SYMBOL_GPL(otg_ulpi_create);
+struct usb_phy *
+devm_otg_ulpi_create(struct device *dev,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ devm_kfree(dev, phy);
+ return NULL;
+ }
+
+ otg_ulpi_init(phy, otg, ops, flags);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_otg_ulpi_create);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 0277f62739a2..ad2554630889 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,6 +34,14 @@ struct phy_devm {
struct notifier_block *nb;
};
+static const char *const usb_chger_type[] = {
+ [UNKNOWN_TYPE] = "USB_CHARGER_UNKNOWN_TYPE",
+ [SDP_TYPE] = "USB_CHARGER_SDP_TYPE",
+ [CDP_TYPE] = "USB_CHARGER_CDP_TYPE",
+ [DCP_TYPE] = "USB_CHARGER_DCP_TYPE",
+ [ACA_TYPE] = "USB_CHARGER_ACA_TYPE",
+};
+
static struct usb_phy *__usb_find_phy(struct list_head *list,
enum usb_phy_type type)
{
@@ -98,7 +106,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
{
struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
char uchger_state[50] = { 0 };
- char *envp[] = { uchger_state, NULL };
+ char uchger_type[50] = { 0 };
+ char *envp[] = { uchger_state, uchger_type, NULL };
unsigned int min, max;
switch (usb_phy->chg_state) {
@@ -122,6 +131,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
return;
}
+ snprintf(uchger_type, ARRAY_SIZE(uchger_type),
+ "USB_CHARGER_TYPE=%s", usb_chger_type[usb_phy->chg_type]);
kobject_uevent_env(&usb_phy->dev->kobj, KOBJ_CHANGE, envp);
}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d438b7871446..3af91b2b8f76 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -8,11 +8,10 @@
*/
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -592,7 +591,8 @@ static int usbhs_probe(struct platform_device *pdev)
struct usbhs_priv *priv;
struct resource *irq_res;
struct device *dev = &pdev->dev;
- int ret, gpio;
+ struct gpio_desc *gpiod;
+ int ret;
u32 tmp;
/* check device node */
@@ -657,10 +657,9 @@ static int usbhs_probe(struct platform_device *pdev)
priv->dparam.pio_dma_border = 64; /* 64byte */
if (!of_property_read_u32(dev_of_node(dev), "renesas,buswait", &tmp))
priv->dparam.buswait_bwait = tmp;
- gpio = of_get_named_gpio_flags(dev_of_node(dev), "renesas,enable-gpio",
- 0, NULL);
- if (gpio > 0)
- priv->dparam.enable_gpio = gpio;
+ gpiod = devm_gpiod_get_optional(dev, "renesas,enable", GPIOD_IN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
/* FIXME */
/* runtime power control ? */
@@ -708,13 +707,10 @@ static int usbhs_probe(struct platform_device *pdev)
usbhs_sys_clock_ctrl(priv, 0);
/* check GPIO determining if USB function should be enabled */
- if (priv->dparam.enable_gpio) {
- gpio_request_one(priv->dparam.enable_gpio, GPIOF_IN, NULL);
- ret = !gpio_get_value(priv->dparam.enable_gpio);
- gpio_free(priv->dparam.enable_gpio);
+ if (gpiod) {
+ ret = !gpiod_get_value(gpiod);
if (ret) {
- dev_warn(dev, "USB function not selected (GPIO %d)\n",
- priv->dparam.enable_gpio);
+ dev_warn(dev, "USB function not selected (GPIO)\n");
ret = -ENOTSUPP;
goto probe_end_mod_exit;
}
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 440d213e1749..52756fc2ac9c 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -6,8 +6,6 @@
* Copyright (C) 2019 Renesas Electronics Corporation
*/
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include "common.h"
#include "rcar2.h"
@@ -34,7 +32,7 @@ static int usbhs_rcar2_hardware_exit(struct platform_device *pdev)
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
if (priv->phy) {
- phy_put(priv->phy);
+ phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/rza2.c b/drivers/usb/renesas_usbhs/rza2.c
index 021749594389..3eed3334a17f 100644
--- a/drivers/usb/renesas_usbhs/rza2.c
+++ b/drivers/usb/renesas_usbhs/rza2.c
@@ -29,7 +29,7 @@ static int usbhs_rza2_hardware_exit(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- phy_put(priv->phy);
+ phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
return 0;
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 409851306e99..80d6559bbcb2 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -161,7 +161,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
- data->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ data->base = devm_ioremap(dev, res->start, resource_size(res));
if (!data->base)
return -ENOMEM;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ed4a18b435a0..25d7e0c36d38 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -129,9 +129,6 @@ config USB_SERIAL_DIGI_ACCELEPORT
parallel port on the USB 2 appears as a third serial port on Linux.
The Digi Acceleport USB 8 is not yet supported by this driver.
- This driver works under SMP with the usb-uhci driver. It does not
- work under SMP with the uhci driver.
-
To compile this driver as a module, choose M here: the
module will be called digi_acceleport.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index df582fe855f0..d3f420f3a083 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
- struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct ch341_private *priv;
int ret;
+ priv = usb_get_serial_port_data(port);
+ if (!priv)
+ return 0;
+
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index ebd76ab07b72..821970609695 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -276,7 +276,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
old_rdtodo = priv->rdtodo;
if (old_rdtodo > SHRT_MAX - size) {
- dev_dbg(dev, "To many bulk_in urbs to do.\n");
+ dev_dbg(dev, "Too many bulk_in urbs to do.\n");
spin_unlock_irqrestore(&priv->lock, flags);
goto resubmit;
}
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 633550ec3025..ffd984142171 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -104,7 +104,7 @@ struct garmin_packet {
int seq;
/* the real size of the data array, always > 0 */
int size;
- __u8 data[1];
+ __u8 data[];
};
/* structure used to keep the current state of the driver */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 9690a5f4b9d6..5737add6a2a4 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb)
if (txCredits) {
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
spin_lock_irqsave(&edge_port->ep_lock,
flags);
edge_port->txCredits += txCredits;
@@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength)
{
- struct device *dev = &edge_serial->serial->dev->dev;
+ struct usb_serial *serial = edge_serial->serial;
+ struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
__u16 lastBufferLength;
@@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
/* spit this data back into the tty driver if this
port is open */
- if (rxLen) {
- port = edge_serial->serial->port[
- edge_serial->rxPort];
+ if (rxLen && edge_serial->rxPort < serial->num_ports) {
+ port = serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
__func__, rxLen,
edge_serial->rxPort);
@@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
rxLen);
edge_port->port->icount.rx += rxLen;
}
- buffer += rxLen;
}
+ buffer += rxLen;
break;
case EXPECT_HDR3: /* Expect 3rd byte of status header */
@@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 code = edge_serial->rxStatusCode;
/* switch the port pointer to the one being currently talked about */
+ if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+ return;
port = edge_serial->serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL) {
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 302eb9530859..79d0586e2b33 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -45,9 +45,10 @@ static int buffer_size;
static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+static int ir_write_room(struct tty_struct *tty);
+static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
@@ -75,10 +76,13 @@ static struct usb_serial_driver ir_device = {
.description = "IR Dongle",
.id_table = ir_id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
.set_termios = ir_set_termios,
.attach = ir_startup,
- .open = ir_open,
- .prepare_write_buffer = ir_prepare_write_buffer,
+ .write = ir_write,
+ .write_room = ir_write_room,
+ .write_bulk_callback = ir_write_bulk_callback,
.process_read_urb = ir_process_read_urb,
};
@@ -251,35 +255,102 @@ static int ir_startup(struct usb_serial *serial)
return 0;
}
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count)
{
- int i;
+ struct urb *urb = NULL;
+ unsigned long flags;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
- port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
+ if (port->bulk_out_size == 0)
+ return -EINVAL;
- /* Start reading from the device */
- return usb_serial_generic_open(tty, port);
-}
+ if (count == 0)
+ return 0;
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size)
-{
- unsigned char *buf = dest;
- int count;
+ count = min(count, port->bulk_out_size - 1);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (__test_and_clear_bit(0, &port->write_urbs_free)) {
+ urb = port->write_urbs[0];
+ port->tx_bytes += count;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (!urb)
+ return 0;
/*
* The first byte of the packet we send to the device contains an
- * inbound header which indicates an additional number of BOFs and
+ * outbound header which indicates an additional number of BOFs and
* a baud rate change.
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
- *buf = ir_xbof | ir_baud;
+ *(u8 *)urb->transfer_buffer = ir_xbof | ir_baud;
+
+ memcpy(urb->transfer_buffer + 1, buf, count);
+
+ urb->transfer_buffer_length = count + 1;
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+ }
+
+ return count;
+}
+
+static void ir_write_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= urb->transfer_buffer_length - 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ switch (status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ case -EPIPE:
+ dev_err(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ default:
+ dev_err(&port->dev, "nonzero write-urb status: %d\n", status);
+ break;
+ }
+
+ usb_serial_port_softint(port);
+}
- count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
- &port->lock);
- return count + 1;
+static int ir_write_room(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ int count = 0;
+
+ if (port->bulk_out_size == 0)
+ return 0;
+
+ if (test_bit(0, &port->write_urbs_free))
+ count = port->bulk_out_size - 1;
+
+ return count;
}
static void ir_process_read_urb(struct urb *urb)
@@ -304,23 +375,15 @@ static void ir_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-static void ir_set_termios_callback(struct urb *urb)
-{
- kfree(urb->transfer_buffer);
-
- if (urb->status)
- dev_dbg(&urb->dev->dev, "%s - non-zero urb status: %d\n",
- __func__, urb->status);
-}
-
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
- struct urb *urb;
+ struct usb_device *udev = port->serial->dev;
unsigned char *transfer_buffer;
- int result;
+ int actual_length;
speed_t baud;
int ir_baud;
+ int ret;
baud = tty_get_baud_rate(tty);
@@ -332,34 +395,34 @@ static void ir_set_termios(struct tty_struct *tty,
switch (baud) {
case 2400:
- ir_baud = USB_IRDA_BR_2400;
+ ir_baud = USB_IRDA_LS_2400;
break;
case 9600:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
break;
case 19200:
- ir_baud = USB_IRDA_BR_19200;
+ ir_baud = USB_IRDA_LS_19200;
break;
case 38400:
- ir_baud = USB_IRDA_BR_38400;
+ ir_baud = USB_IRDA_LS_38400;
break;
case 57600:
- ir_baud = USB_IRDA_BR_57600;
+ ir_baud = USB_IRDA_LS_57600;
break;
case 115200:
- ir_baud = USB_IRDA_BR_115200;
+ ir_baud = USB_IRDA_LS_115200;
break;
case 576000:
- ir_baud = USB_IRDA_BR_576000;
+ ir_baud = USB_IRDA_LS_576000;
break;
case 1152000:
- ir_baud = USB_IRDA_BR_1152000;
+ ir_baud = USB_IRDA_LS_1152000;
break;
case 4000000:
- ir_baud = USB_IRDA_BR_4000000;
+ ir_baud = USB_IRDA_LS_4000000;
break;
default:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
baud = 9600;
}
@@ -375,42 +438,22 @@ static void ir_set_termios(struct tty_struct *tty,
/*
* send the baud change out on an "empty" data packet
*/
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return;
-
transfer_buffer = kmalloc(1, GFP_KERNEL);
if (!transfer_buffer)
- goto err_buf;
+ return;
*transfer_buffer = ir_xbof | ir_baud;
- usb_fill_bulk_urb(
- urb,
- port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- transfer_buffer,
- 1,
- ir_set_termios_callback,
- port);
-
- urb->transfer_flags = URB_ZERO_PACKET;
-
- result = usb_submit_urb(urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "%s - failed to submit urb: %d\n",
- __func__, result);
- goto err_subm;
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, port->bulk_out_endpointAddress),
+ transfer_buffer, 1, &actual_length, 5000);
+ if (ret || actual_length != 1) {
+ if (actual_length != 1)
+ ret = -EIO;
+ dev_err(&port->dev, "failed to change line speed: %d\n", ret);
}
- usb_free_urb(urb);
-
- return;
-err_subm:
kfree(transfer_buffer);
-err_buf:
- usb_free_urb(urb);
}
static int __init ir_init(void)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index e66a59ef43a1..aa3dbce22cfb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index cb7aac9cd9e7..0af76800bd78 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -41,6 +41,9 @@ struct opticon_private {
bool rts;
bool cts;
int outstanding_urbs;
+ int outstanding_bytes;
+
+ struct usb_anchor anchor;
};
@@ -113,7 +116,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, 0);
+ 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
kfree(buffer);
if (retval < 0)
@@ -149,6 +152,15 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
return res;
}
+static void opticon_close(struct usb_serial_port *port)
+{
+ struct opticon_private *priv = usb_get_serial_port_data(port);
+
+ usb_kill_anchored_urbs(&priv->anchor);
+
+ usb_serial_generic_close(port);
+}
+
static void opticon_write_control_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
@@ -169,6 +181,7 @@ static void opticon_write_control_callback(struct urb *urb)
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
+ priv->outstanding_bytes -= urb->transfer_buffer_length;
spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
@@ -182,8 +195,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
struct urb *urb;
unsigned char *buffer;
unsigned long flags;
- int status;
struct usb_ctrlrequest *dr;
+ int ret = -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
@@ -192,19 +205,16 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
}
priv->outstanding_urbs++;
+ priv->outstanding_bytes += count;
spin_unlock_irqrestore(&priv->lock, flags);
buffer = kmalloc(count, GFP_ATOMIC);
- if (!buffer) {
- count = -ENOMEM;
+ if (!buffer)
goto error_no_buffer;
- }
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- count = -ENOMEM;
+ if (!urb)
goto error_no_urb;
- }
memcpy(buffer, buf, count);
@@ -213,10 +223,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
/* The connected devices do not have a bulk write endpoint,
* to transmit data to de barcode device the control endpoint is used */
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!dr) {
- count = -ENOMEM;
+ if (!dr)
goto error_no_dr;
- }
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
dr->bRequest = 0x01;
@@ -229,13 +237,13 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
(unsigned char *)dr, buffer, count,
opticon_write_control_callback, port);
+ usb_anchor_urb(urb, &priv->anchor);
+
/* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - usb_submit_urb(write endpoint) failed status = %d\n",
- __func__, status);
- count = status;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+ usb_unanchor_urb(urb);
goto error;
}
@@ -253,8 +261,10 @@ error_no_urb:
error_no_buffer:
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
+ priv->outstanding_bytes -= count;
spin_unlock_irqrestore(&priv->lock, flags);
- return count;
+
+ return ret;
}
static int opticon_write_room(struct tty_struct *tty)
@@ -279,6 +289,20 @@ static int opticon_write_room(struct tty_struct *tty)
return 2048;
}
+static int opticon_chars_in_buffer(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct opticon_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ count = priv->outstanding_bytes;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return count;
+}
+
static int opticon_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -354,6 +378,7 @@ static int opticon_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
+ init_usb_anchor(&priv->anchor);
usb_set_serial_port_data(port, priv);
@@ -381,8 +406,10 @@ static struct usb_serial_driver opticon_device = {
.port_probe = opticon_port_probe,
.port_remove = opticon_port_remove,
.open = opticon_open,
+ .close = opticon_close,
.write = opticon_write,
.write_room = opticon_write_room,
+ .chars_in_buffer = opticon_chars_in_buffer,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.get_serial = get_serial_info,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2d919d0e6e45..084cc2fff3ae 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
+#define QUECTEL_PRODUCT_RM500Q 0x0800
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1104,6 +1105,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
+
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a62981ca7a73..f93b81a297d6 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
u8 newMSR = (u8) *ch;
unsigned long flags;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowMSR = newMSR;
@@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
unsigned long flags;
u8 newLSR = (u8) *ch;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
if (newLSR & UART_LSR_BI)
newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index edbbb13d6de6..bd23a7cb1be2 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS);
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+ { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8f066bb55d7d..dc7a65b9ec98 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
return -EINVAL;
}
+ /* Prevent individual ports from being unbound. */
+ driver->driver.suppress_bind_attrs = true;
+
usb_serial_operations_init(driver);
/* Add this device to our list of devices */
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 4092248a5936..0edfb89e04a8 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -188,7 +188,7 @@ static void dp_altmode_work(struct work_struct *work)
switch (dp->state) {
case DP_STATE_ENTER:
- ret = typec_altmode_enter(dp->alt);
+ ret = typec_altmode_enter(dp->alt, NULL);
if (ret)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
@@ -306,7 +306,8 @@ err_unlock:
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
- return activate ? typec_altmode_enter(alt) : typec_altmode_exit(alt);
+ return activate ? typec_altmode_enter(alt, NULL) :
+ typec_altmode_exit(alt);
}
static const struct typec_altmode_ops dp_altmode_ops = {
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 74cb3c2ecb34..2e45eb479386 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Bus for USB Type-C Alternate Modes
*
* Copyright (C) 2018 Intel Corporation
@@ -10,12 +10,23 @@
#include "bus.h"
-static inline int typec_altmode_set_mux(struct altmode *alt, u8 state)
+static inline int
+typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
{
- return alt->mux ? alt->mux->set(alt->mux, state) : 0;
+ struct typec_mux_state state;
+
+ if (!alt->mux)
+ return 0;
+
+ state.alt = &alt->adev;
+ state.mode = conf;
+ state.data = data;
+
+ return alt->mux->set(alt->mux, &state);
}
-static int typec_altmode_set_state(struct typec_altmode *adev, int state)
+static int typec_altmode_set_state(struct typec_altmode *adev,
+ unsigned long conf, void *data)
{
bool is_port = is_typec_port(adev->dev.parent);
struct altmode *port_altmode;
@@ -23,11 +34,11 @@ static int typec_altmode_set_state(struct typec_altmode *adev, int state)
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
- ret = typec_altmode_set_mux(port_altmode, state);
+ ret = typec_altmode_set_mux(port_altmode, conf, data);
if (ret)
return ret;
- blocking_notifier_call_chain(&port_altmode->nh, state, NULL);
+ blocking_notifier_call_chain(&port_altmode->nh, conf, NULL);
return 0;
}
@@ -67,7 +78,7 @@ int typec_altmode_notify(struct typec_altmode *adev,
is_port = is_typec_port(adev->dev.parent);
partner = altmode->partner;
- ret = typec_altmode_set_mux(is_port ? altmode : partner, (u8)conf);
+ ret = typec_altmode_set_mux(is_port ? altmode : partner, conf, data);
if (ret)
return ret;
@@ -84,12 +95,14 @@ EXPORT_SYMBOL_GPL(typec_altmode_notify);
/**
* typec_altmode_enter - Enter Mode
* @adev: The alternate mode
+ * @vdo: VDO for the Enter Mode command
*
* The alternate mode drivers use this function to enter mode. The port drivers
* use this to inform the alternate mode drivers that the partner has initiated
- * Enter Mode command.
+ * Enter Mode command. If the alternate mode does not require VDO, @vdo must be
+ * NULL.
*/
-int typec_altmode_enter(struct typec_altmode *adev)
+int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev = &partner->adev;
@@ -101,13 +114,16 @@ int typec_altmode_enter(struct typec_altmode *adev)
if (!pdev->ops || !pdev->ops->enter)
return -EOPNOTSUPP;
+ if (is_typec_port(pdev->dev.parent) && !pdev->active)
+ return -EPERM;
+
/* Moving to USB Safe State */
- ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
/* Enter Mode */
- return pdev->ops->enter(pdev);
+ return pdev->ops->enter(pdev, vdo);
}
EXPORT_SYMBOL_GPL(typec_altmode_enter);
@@ -130,7 +146,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
return -EOPNOTSUPP;
/* Moving to USB Safe State */
- ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
@@ -383,7 +399,7 @@ static int typec_remove(struct device *dev)
drv->remove(to_typec_altmode(dev));
if (adev->active) {
- WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE));
+ WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL));
typec_altmode_update_active(adev, false);
}
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 91d62276b56f..7c44e930602f 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -834,6 +834,52 @@ static const struct device_type typec_cable_dev_type = {
.release = typec_cable_release,
};
+static int cable_match(struct device *dev, void *data)
+{
+ return is_typec_cable(dev);
+}
+
+/**
+ * typec_cable_get - Get a reference to the USB Type-C cable
+ * @port: The USB Type-C Port the cable is connected to
+ *
+ * The caller must decrement the reference count with typec_cable_put() after
+ * use.
+ */
+struct typec_cable *typec_cable_get(struct typec_port *port)
+{
+ struct device *dev;
+
+ dev = device_find_child(&port->dev, NULL, cable_match);
+ if (!dev)
+ return NULL;
+
+ return to_typec_cable(dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_get);
+
+/**
+ * typec_cable_get - Decrement the reference count on USB Type-C cable
+ * @cable: The USB Type-C cable
+ */
+void typec_cable_put(struct typec_cable *cable)
+{
+ put_device(&cable->dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_put);
+
+/**
+ * typec_cable_is_active - Check is the USB Type-C cable active or passive
+ * @cable: The USB Type-C Cable
+ *
+ * Return 1 if the cable is active or 0 if it's passive.
+ */
+int typec_cable_is_active(struct typec_cable *cable)
+{
+ return cable->active;
+}
+EXPORT_SYMBOL_GPL(typec_cable_is_active);
+
/**
* typec_cable_set_identity - Report result from Discover Identity command
* @cable: The cable updated identity values
@@ -1483,7 +1529,11 @@ EXPORT_SYMBOL_GPL(typec_get_orientation);
*/
int typec_set_mode(struct typec_port *port, int mode)
{
- return port->mux ? port->mux->set(port->mux, mode) : 0;
+ struct typec_mux_state state = { };
+
+ state.mode = mode;
+
+ return port->mux ? port->mux->set(port->mux, &state) : 0;
}
EXPORT_SYMBOL_GPL(typec_set_mode);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 57907f26f681..5baf0f416c73 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* USB Type-C Multiplexer/DeMultiplexer Switch support
*
* Copyright (C) 2018 Intel Corporation
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 5585b109095b..46457c133d2b 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -73,7 +73,8 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
return ret;
}
-static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
+static int
+pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
@@ -82,7 +83,7 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
mutex_lock(&pi->lock);
new_conf = pi->conf;
- switch (state) {
+ switch (state->mode) {
case TYPEC_STATE_SAFE:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_OPEN;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index ed8655c6af8c..b498960ff72b 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1666,7 +1666,7 @@ static const struct property_entry port_props[] = {
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
- PROPERTY_ENTRY_U32("op-sink-microwatt", 2500),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
{ }
};
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 8b4ff9fff340..753645bb2527 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -591,6 +591,12 @@ static int tcpci_probe(struct i2c_client *client,
static int tcpci_remove(struct i2c_client *client)
{
struct tcpci_chip *chip = i2c_get_clientdata(client);
+ int err;
+
+ /* Disable chip interrupts before unregistering port */
+ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
+ if (err < 0)
+ return err;
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 56fc356bc55c..f3087ef8265c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1475,16 +1475,16 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
return 0;
}
-static int tcpm_altmode_enter(struct typec_altmode *altmode)
+static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
u32 header;
mutex_lock(&port->lock);
- header = VDO(altmode->svid, 1, CMD_ENTER_MODE);
+ header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm(port, header, NULL, 0);
+ tcpm_queue_vdm(port, header, vdo, vdo ? 1 : 0);
mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index edc271da14f4..9b745f432c91 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -597,7 +597,7 @@ static const struct property_entry wcove_props[] = {
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
- PROPERTY_ENTRY_U32("op-sink-microwatt", 15000),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 15000000),
{ }
};
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d4d5189edfb8..0f1273ae086c 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -45,7 +45,7 @@ struct ucsi_dp {
* -EOPNOTSUPP.
*/
-static int ucsi_displayport_enter(struct typec_altmode *alt)
+static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
struct ucsi *ucsi = dp->con->ucsi;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 4459bc68aa33..d5a6aac86327 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -189,7 +189,7 @@ int ucsi_resume(struct ucsi *ucsi)
u64 command;
/* Restore UCSI notification enable mask after system resume */
- command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
return ucsi_send_command(ucsi, command, NULL, 0);
}
@@ -318,6 +318,82 @@ err:
return ret;
}
+static int
+ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
+{
+ int max_altmodes = UCSI_MAX_ALTMODES;
+ struct typec_altmode_desc desc;
+ struct ucsi_altmode alt;
+ struct ucsi_altmode orig[UCSI_MAX_ALTMODES];
+ struct ucsi_altmode updated[UCSI_MAX_ALTMODES];
+ struct ucsi *ucsi = con->ucsi;
+ bool multi_dp = false;
+ u64 command;
+ int ret;
+ int len;
+ int i;
+ int k = 0;
+
+ if (recipient == UCSI_RECIPIENT_CON)
+ max_altmodes = con->ucsi->cap.num_alt_modes;
+
+ memset(orig, 0, sizeof(orig));
+ memset(updated, 0, sizeof(updated));
+
+ /* First get all the alternate modes */
+ for (i = 0; i < max_altmodes; i++) {
+ memset(&alt, 0, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
+ /*
+ * We are collecting all altmodes first and then registering.
+ * Some type-C device will return zero length data beyond last
+ * alternate modes. We should not return if length is zero.
+ */
+ if (len < 0)
+ return len;
+
+ /* We got all altmodes, now break out and register them */
+ if (!len || !alt.svid)
+ break;
+
+ orig[k].mid = alt.mid;
+ orig[k].svid = alt.svid;
+ k++;
+ }
+ /*
+ * Update the original altmode table as some ppms may report
+ * multiple DP altmodes.
+ */
+ if (recipient == UCSI_RECIPIENT_CON)
+ multi_dp = ucsi->ops->update_altmodes(ucsi, orig, updated);
+
+ /* now register altmodes */
+ for (i = 0; i < max_altmodes; i++) {
+ memset(&desc, 0, sizeof(desc));
+ if (multi_dp && recipient == UCSI_RECIPIENT_CON) {
+ desc.svid = updated[i].svid;
+ desc.vdo = updated[i].mid;
+ } else {
+ desc.svid = orig[i].svid;
+ desc.vdo = orig[i].mid;
+ }
+ desc.roles = TYPEC_PORT_DRD;
+
+ if (!desc.svid)
+ return 0;
+
+ ret = ucsi_register_altmode(con, &desc, recipient);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
{
int max_altmodes = UCSI_MAX_ALTMODES;
@@ -336,6 +412,9 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
if (recipient == UCSI_RECIPIENT_SOP && con->partner_altmode[0])
return 0;
+ if (con->ucsi->ops->update_altmodes)
+ return ucsi_register_altmodes_nvidia(con, recipient);
+
if (recipient == UCSI_RECIPIENT_CON)
max_altmodes = con->ucsi->cap.num_alt_modes;
@@ -589,6 +668,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
struct ucsi_connector *con = &ucsi->connector[num - 1];
+ if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
+ dev_dbg(ucsi->dev, "Bogus connector change event\n");
+ return;
+ }
+
if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
schedule_work(&con->work);
}
@@ -656,7 +740,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
@@ -890,8 +974,8 @@ int ucsi_init(struct ucsi *ucsi)
}
/* Enable basic notifications */
- command = UCSI_SET_NOTIFICATION_ENABLE;
- command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
@@ -923,7 +1007,8 @@ int ucsi_init(struct ucsi *ucsi)
}
/* Enable all notifications */
- command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 831c9470bdc1..e434b9c9a9eb 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -11,6 +11,7 @@
/* -------------------------------------------------------------------------- */
struct ucsi;
+struct ucsi_altmode;
/* UCSI offsets (Bytes) */
#define UCSI_VERSION 0
@@ -35,6 +36,7 @@ struct ucsi;
* @read: Read operation
* @sync_write: Blocking write operation
* @async_write: Non-blocking write operation
+ * @update_altmodes: Squashes duplicate DP altmodes
*
* Read and write routines for UCSI interface. @sync_write must wait for the
* Command Completion Event from the PPM before returning, and @async_write must
@@ -47,6 +49,8 @@ struct ucsi_operations {
const void *val, size_t val_len);
int (*async_write)(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len);
+ bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated);
};
struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
@@ -82,6 +86,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_ERROR_STATUS 0x13
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
/* CONNECTOR_RESET command bits */
#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
@@ -140,6 +145,12 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
#define UCSI_ERROR_SWAP_REJECTED BIT(12)
+#define UCSI_SET_NEW_CAM_ENTER(x) (((x) >> 23) & 0x1)
+#define UCSI_SET_NEW_CAM_GET_AM(x) (((x) >> 24) & 0xff)
+#define UCSI_SET_NEW_CAM_AM_MASK (0xff << 24)
+#define UCSI_SET_NEW_CAM_SET_AM(x) (((x) & 0xff) << 24)
+#define UCSI_CMD_CONNECTOR_MASK (0x7)
+
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
u32 attributes;
@@ -269,6 +280,9 @@ struct ucsi {
/* PPM Communication lock */
struct mutex ppm_lock;
+ /* The latest "Notification Enable" bits (SET_NOTIFICATION_ENABLE) */
+ u64 ntfy;
+
/* PPM communication flags */
unsigned long flags;
#define EVENT_PENDING 0
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 3f1786170098..9fc4f338e870 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -127,7 +127,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- /* This will make sure we can use ioremap_nocache() */
+ /* This will make sure we can use ioremap() */
status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
if (ACPI_FAILURE(status))
return -ENOMEM;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 3370b3fc37b1..a5b8530490db 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/usb/typec_dp.h>
#include <asm/unaligned.h>
#include "ucsi.h"
@@ -173,6 +174,15 @@ struct ccg_resp {
u8 length;
};
+struct ucsi_ccg_altmode {
+ u16 svid;
+ u32 mid;
+ u8 linked_idx;
+ u8 active_idx;
+#define UCSI_MULTI_DP_INDEX (0xff)
+ bool checked;
+} __packed;
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
@@ -198,6 +208,11 @@ struct ucsi_ccg {
struct work_struct pm_work;
struct completion complete;
+
+ u64 last_cmd_sent;
+ bool has_multiple_dp;
+ struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
+ struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -319,12 +334,169 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
+static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
+{
+ u8 cam, new_cam;
+
+ cam = data[0];
+ new_cam = uc->orig[cam].linked_idx;
+ uc->updated[new_cam].active_idx = cam;
+ data[0] = new_cam;
+}
+
+static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
+ struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated)
+{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ struct ucsi_ccg_altmode *alt, *new_alt;
+ int i, j, k = 0;
+ bool found = false;
+
+ alt = uc->orig;
+ new_alt = uc->updated;
+ memset(uc->updated, 0, sizeof(uc->updated));
+
+ /*
+ * Copy original connector altmodes to new structure.
+ * We need this before second loop since second loop
+ * checks for duplicate altmodes.
+ */
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+ alt[i].svid = orig[i].svid;
+ alt[i].mid = orig[i].mid;
+ if (!alt[i].svid)
+ break;
+ }
+
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+ if (!alt[i].svid)
+ break;
+
+ /* already checked and considered */
+ if (alt[i].checked)
+ continue;
+
+ if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
+ /* Found Non DP altmode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid;
+ new_alt[k].linked_idx = i;
+ alt[i].linked_idx = k;
+ updated[k].svid = new_alt[k].svid;
+ updated[k].mid = new_alt[k].mid;
+ k++;
+ continue;
+ }
+
+ for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
+ if (alt[i].svid != alt[j].svid ||
+ !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
+ continue;
+ } else {
+ /* Found duplicate DP mode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid | alt[j].mid;
+ new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
+ alt[i].linked_idx = k;
+ alt[j].linked_idx = k;
+ alt[j].checked = true;
+ found = true;
+ }
+ }
+ if (found) {
+ uc->has_multiple_dp = true;
+ } else {
+ /* Didn't find any duplicate DP altmode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid;
+ new_alt[k].linked_idx = i;
+ alt[i].linked_idx = k;
+ }
+ updated[k].svid = new_alt[k].svid;
+ updated[k].mid = new_alt[k].mid;
+ k++;
+ }
+ return found;
+}
+
+static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
+ struct ucsi_connector *con,
+ u64 *cmd)
+{
+ struct ucsi_ccg_altmode *new_port, *port;
+ struct typec_altmode *alt = NULL;
+ u8 new_cam, cam, pin;
+ bool enter_new_mode;
+ int i, j, k = 0xff;
+
+ port = uc->orig;
+ new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
+ new_port = &uc->updated[new_cam];
+ cam = new_port->linked_idx;
+ enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
+
+ /*
+ * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
+ * with multiple DP mode. Find out CAM for best pin assignment
+ * among all DP mode. Priorite pin E->D->C after making sure
+ * the partner supports that pin.
+ */
+ if (cam == UCSI_MULTI_DP_INDEX) {
+ if (enter_new_mode) {
+ for (i = 0; con->partner_altmode[i]; i++) {
+ alt = con->partner_altmode[i];
+ if (alt->svid == new_port->svid)
+ break;
+ }
+ /*
+ * alt will always be non NULL since this is
+ * UCSI_SET_NEW_CAM command and so there will be
+ * at least one con->partner_altmode[i] with svid
+ * matching with new_port->svid.
+ */
+ for (j = 0; port[j].svid; j++) {
+ pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
+ if (alt && port[j].svid == alt->svid &&
+ (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
+ /* prioritize pin E->D->C */
+ if (k == 0xff || (k != 0xff && pin >
+ DP_CONF_GET_PIN_ASSIGN(port[k].mid))
+ ) {
+ k = j;
+ }
+ }
+ }
+ cam = k;
+ new_port->active_idx = cam;
+ } else {
+ cam = new_port->active_idx;
+ }
+ }
+ *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
+ *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
+}
+
static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
void *val, size_t val_len)
{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
+ ret = ccg_read(uc, reg, val, val_len);
+ if (ret)
+ return ret;
+
+ if (offset == UCSI_MESSAGE_IN) {
+ if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_GET_CURRENT_CAM &&
+ uc->has_multiple_dp) {
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
+ }
+ uc->last_cmd_sent = 0;
+ }
+
+ return ret;
}
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
@@ -339,12 +511,26 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ struct ucsi_connector *con;
+ int con_index;
int ret;
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
set_bit(DEV_CMD_PENDING, &uc->flags);
+ if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
+ uc->last_cmd_sent = *(u64 *)val;
+
+ if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ uc->has_multiple_dp) {
+ con_index = (uc->last_cmd_sent >> 16) &
+ UCSI_CMD_CONNECTOR_MASK;
+ con = &uc->ucsi->connector[con_index - 1];
+ ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
+ }
+ }
+
ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
if (ret)
goto err_clear_bit;
@@ -363,7 +549,8 @@ err_clear_bit:
static const struct ucsi_operations ucsi_ccg_ops = {
.read = ucsi_ccg_read,
.sync_write = ucsi_ccg_sync_write,
- .async_write = ucsi_ccg_async_write
+ .async_write = ucsi_ccg_async_write,
+ .update_altmodes = ucsi_ccg_update_altmodes
};
static irqreturn_t ccg_irq_handler(int irq, void *data)
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 0120d8324a40..a87992892a9f 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -230,7 +230,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
- iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+ iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
off = pos - 0xa0000;
rsrc = VGA_RSRC_LEGACY_MEM;
is_ioport = false;
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index 2d2babe21b2f..40d4fb9276ba 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -54,13 +54,13 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
if (!xgmac_regs->ioaddr) {
xgmac_regs->ioaddr =
- ioremap_nocache(xgmac_regs->addr, xgmac_regs->size);
+ ioremap(xgmac_regs->addr, xgmac_regs->size);
if (!xgmac_regs->ioaddr)
return -ENOMEM;
}
if (!xpcs_regs->ioaddr) {
xpcs_regs->ioaddr =
- ioremap_nocache(xpcs_regs->addr, xpcs_regs->size);
+ ioremap(xpcs_regs->addr, xpcs_regs->size);
if (!xpcs_regs->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
index 16165a62b86d..96064ef8f629 100644
--- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -82,7 +82,7 @@ static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
/* Map FlexRM ring registers if not mapped */
if (!reg->ioaddr) {
- reg->ioaddr = ioremap_nocache(reg->addr, reg->size);
+ reg->ioaddr = ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
index f67bab547501..09a9453b75c5 100644
--- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
+++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
@@ -52,7 +52,7 @@ static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index e8f2bdbe0542..c0771a9567fb 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -409,7 +409,7 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
@@ -486,7 +486,7 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index 9f3be0258623..27ba2ed4138a 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -633,7 +633,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ret = -EBUSY;
goto err_free_hw;
}
- hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start,
+ hw->v_regs = ioremap(carminefb_fix.mmio_start,
carminefb_fix.mmio_len);
if (!hw->v_regs) {
printk(KERN_ERR "carminefb: Can't remap %s register.\n",
@@ -664,7 +664,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_unmap_vregs;
}
- hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start,
+ hw->screen_mem = ioremap(carminefb_fix.smem_start,
carminefb_fix.smem_len);
if (!hw->screen_mem) {
printk(KERN_ERR "carmine: Can't ioremap smem area.\n");
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index d18f7b31932c..aa7583d963ac 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1883,7 +1883,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
}
par->res_flags |= MMIO_REQ;
- par->mmio_start_virtual = ioremap_nocache(par->mmio_start_phys,
+ par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
printk("i810fb_init: cannot remap mmio region\n");
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index a76c61512c60..a09fc2eaa40d 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -654,7 +654,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
}
dinfo->mmio_base =
- (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+ (u8 __iomem *)ioremap(dinfo->mmio_base_phys,
INTEL_REG_SIZE);
if (!dinfo->mmio_base) {
ERR_MSG("Cannot remap MMIO region.\n");
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index a7bd9f25911b..a8660926924b 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -683,7 +683,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
kyro_fix.mmio_len = pci_resource_len(pdev, 1);
currentpar->regbase = deviceInfo.pSTGReg =
- ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len);
+ ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len);
if (!currentpar->regbase)
goto out_free_fb;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 1a555f70923a..36cc718b96ae 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1710,7 +1710,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
memsize = mem;
err = -ENOMEM;
- minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384);
+ minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384);
if (!minfo->mmio.vbase.vaddr) {
printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys);
goto failVideoMR;
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 50935252b50b..3de4b3ed990a 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -938,7 +938,7 @@ static int mbxfb_probe(struct platform_device *dev)
}
mfbi->reg_phys_addr = mfbi->reg_res->start;
- mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev,
+ mfbi->reg_virt_addr = devm_ioremap(&dev->dev,
mfbi->reg_phys_addr,
res_size(mfbi->reg_req));
if (!mfbi->reg_virt_addr) {
@@ -948,7 +948,7 @@ static int mbxfb_probe(struct platform_device *dev)
}
virt_base_2700 = mfbi->reg_virt_addr;
- mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr,
+ mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
if (!mfbi->fb_virt_addr) {
dev_err(&dev->dev, "failed to ioremap frame buffer\n");
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 17174cd7a5bb..974e4c28b08b 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -485,7 +485,7 @@ static int mmphw_probe(struct platform_device *pdev)
goto failed;
}
- ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
+ ctrl->reg_base = devm_ioremap(ctrl->dev,
res->start, resource_size(res));
if (ctrl->reg_base == NULL) {
dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 1dcf02e12af4..7cc1216b1389 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1563,7 +1563,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_exit_neither;
}
default_par->v_regs =
- ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+ ioremap(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
if (!default_par->v_regs) {
printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n",
pm2fb_fix.id);
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 6130aa56a1e9..2fa46607e0fc 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -1236,7 +1236,7 @@ static unsigned long pm3fb_size_memory(struct pm3_par *par)
return 0;
}
screen_mem =
- ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
+ ioremap(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
if (!screen_mem) {
printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n");
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
@@ -1347,7 +1347,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_exit_neither;
}
par->v_regs =
- ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
+ ioremap(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
if (!par->v_regs) {
printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n",
pm3fb_fix.id);
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index d1e78ce3a9c2..d5bf185fc376 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -188,7 +188,7 @@ static int pmagaafb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -199,7 +199,7 @@ static int pmagaafb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET;
- info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->screen_base = ioremap(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 56b912bb28de..2ddcdf7919a2 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -180,7 +180,7 @@ static int pmagbafb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -190,7 +190,7 @@ static int pmagbafb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_BA_FBMEM;
- info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->screen_base = ioremap(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index 2822b2225924..90d2b04feb42 100644
--- a/drivers/video/fbdev/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
@@ -287,7 +287,7 @@ static int pmagbbfb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -298,7 +298,7 @@ static int pmagbbfb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAGB_B_FBMEM;
- par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
+ par->smem = ioremap(info->fix.smem_start, info->fix.smem_len);
if (!par->smem) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
err = -ENOMEM;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 0a3b2b7c7891..c680b3e651cb 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -770,7 +770,7 @@ static int __maybe_unused pvr2fb_common_init(void)
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
- fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
+ fb_info->screen_base = ioremap(pvr2_fix.smem_start,
pvr2_fix.smem_len);
if (!fb_info->screen_base) {
@@ -778,7 +778,7 @@ static int __maybe_unused pvr2fb_common_init(void)
goto out_err;
}
- par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start,
+ par->mmio_base = ioremap(pvr2_fix.mmio_start,
pvr2_fix.mmio_len);
if (!par->mmio_base) {
printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 1410f476e135..5615054a0cad 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -665,7 +665,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/*
* Map LCD controller registers.
*/
- fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ fbi->reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (fbi->reg_base == NULL) {
ret = -ENOMEM;
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index e04efb567b5c..8048499e398d 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -809,7 +809,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
default_par = info->par;
- default_par->regs = ioremap_nocache(pdev->resource[1].start,
+ default_par->regs = ioremap(pdev->resource[1].start,
pdev->resource[1].end - pdev->resource[1].start +1);
if (!default_par->regs) {
printk(KERN_ERR PFX "unable to map registers\n");
@@ -818,7 +818,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
}
info->pseudo_palette = default_par->pseudo_palette;
- info->screen_base = ioremap_nocache(pdev->resource[0].start,
+ info->screen_base = ioremap(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start +1);
if (!info->screen_base) {
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index ab8fe838c776..f72b03594719 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -463,7 +463,7 @@ static int sh7760fb_probe(struct platform_device *pdev)
goto out_fb;
}
- par->base = ioremap_nocache(res->start, resource_size(res));
+ par->base = ioremap(res->start, resource_size(res));
if (!par->base) {
dev_err(&pdev->dev, "cannot remap\n");
ret = -ENODEV;
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index c249763dbf0b..54ee7e02a244 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2588,7 +2588,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
if (num_channels == 2)
priv->forced_fourcc = pdata->ch[0].fourcc;
- priv->base = ioremap_nocache(res->start, resource_size(res));
+ priv->base = ioremap(res->start, resource_size(res));
if (!priv->base) {
error = -ENOMEM;
goto err1;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 4e22ae383c87..1f171a527174 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1363,14 +1363,14 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_fb_mem;
}
- par->mmio_vbase = ioremap_nocache(fix->mmio_start,
+ par->mmio_vbase = ioremap(fix->mmio_start,
fix->mmio_len);
if (!par->mmio_vbase) {
printk(KERN_ERR "sstfb: cannot remap register area %#lx\n",
fix->mmio_start);
goto fail_mmio_remap;
}
- info->screen_base = ioremap_nocache(fix->smem_start, 0x400000);
+ info->screen_base = ioremap(fix->smem_start, 0x400000);
if (!info->screen_base) {
printk(KERN_ERR "sstfb: cannot remap framebuffer %#lx\n",
fix->smem_start);
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 9e88e3f594c2..46709443a82f 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1198,7 +1198,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */
/* FIXME: TomCat supports two heads:
* fb.iobase = REGION_BASE(fb_info,3);
- * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx);
+ * fb.screen_base = ioremap(REGION_BASE(fb_info,2),xxx);
* for now we only support the left one ! */
xres = fb->ngle_rom.x_size_visible;
yres = fb->ngle_rom.y_size_visible;
@@ -1291,7 +1291,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
strcpy(fix->id, "stifb");
info->fbops = &stifb_ops;
- info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
+ info->screen_base = ioremap(REGION_BASE(fb,1), fix->smem_len);
if (!info->screen_base) {
printk(KERN_ERR "stifb: failed to map memory\n");
goto out_err0;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index fdbb1ea66e6c..0337d1a1a70b 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1417,7 +1417,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
default_par->regbase_virt =
- ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!default_par->regbase_virt) {
printk(KERN_ERR "fb: Can't remap %s register area.\n",
info->fix.id);
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 286b2371c7dd..1966f1d70899 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1438,7 +1438,7 @@ static int tgafb_register(struct device *dev)
}
/* Map the framebuffer. */
- mem_base = ioremap_nocache(bar0_start, bar0_len);
+ mem_base = ioremap(bar0_start, bar0_len);
if (!mem_base) {
printk(KERN_ERR "tgafb: Cannot map MMIO\n");
goto err1;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index da74bf6c5996..91b2f6ca2607 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1556,7 +1556,7 @@ static int trident_pci_probe(struct pci_dev *dev,
return -1;
}
- default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start,
+ default_par->io_virt = ioremap(tridentfb_fix.mmio_start,
tridentfb_fix.mmio_len);
if (!default_par->io_virt) {
@@ -1579,7 +1579,7 @@ static int trident_pci_probe(struct pci_dev *dev,
goto out_unmap1;
}
- info->screen_base = ioremap_nocache(tridentfb_fix.smem_start,
+ info->screen_base = ioremap(tridentfb_fix.smem_start,
tridentfb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index e04fde9c1fcd..97a59b5a4570 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -356,7 +356,7 @@ int __init valkyriefb_init(void)
p->total_vram = 0x100000;
p->frame_buffer_phys = frame_buffer_phys;
#ifdef CONFIG_MAC
- p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+ p->frame_buffer = ioremap(frame_buffer_phys, p->total_vram);
#else
p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
#endif
diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c
index c1e3738e6789..79d42b23d850 100644
--- a/drivers/video/fbdev/vermilion/cr_pll.c
+++ b/drivers/video/fbdev/vermilion/cr_pll.c
@@ -159,7 +159,7 @@ static int __init cr_pll_init(void)
pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR,
&mch_bar);
mch_regs_base =
- ioremap_nocache(mch_bar, CRVML_MCHMAP_SIZE);
+ ioremap(mch_bar, CRVML_MCHMAP_SIZE);
if (!mch_regs_base) {
printk(KERN_ERR
"Carillo Ranch MCH device was not enabled.\n");
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 498038a964ee..ff61605b8764 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -317,7 +317,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
": Could not claim display controller MMIO.\n");
return -EBUSY;
}
- par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size);
+ par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
if (par->vdc_mem == NULL) {
printk(KERN_ERR MODULE_NAME
": Could not map display controller MMIO.\n");
@@ -332,7 +332,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
err = -EBUSY;
goto out_err_1;
}
- par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size);
+ par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
if (par->gpu_mem == NULL) {
printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
err = -ENOMEM;
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index ffa2ca2d3f5e..703ddee9a244 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -442,7 +442,7 @@ static int via_pci_setup_mmio(struct viafb_dev *vdev)
*/
vdev->engine_start = pci_resource_start(vdev->pdev, 1);
vdev->engine_len = pci_resource_len(vdev->pdev, 1);
- vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+ vdev->engine_mmio = ioremap(vdev->engine_start,
vdev->engine_len);
if (vdev->engine_mmio == NULL)
dev_err(&vdev->pdev->dev,
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 3be07807edcd..0796b1d90981 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -648,12 +648,12 @@ int w100fb_probe(struct platform_device *pdev)
return -EINVAL;
/* Remap the chip base address */
- remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
+ remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN);
if (remapped_base == NULL)
goto out;
/* Map the register space */
- remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
+ remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN);
if (remapped_regs == NULL)
goto out;
@@ -672,7 +672,7 @@ int w100fb_probe(struct platform_device *pdev)
printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
/* Remap the framebuffer */
- remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
+ remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
if (remapped_fbuf == NULL)
goto out;
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 2307b0329aec..d823d558c0c4 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sizes.h>
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 43c391626a00..50920b6fc319 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -7,6 +7,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
index 1b6e42e5e8fd..51e056bae943 100644
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -55,7 +55,7 @@ static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Map registers in BAR 0 */
- vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
+ vmic_base = ioremap(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 1edb8a5de873..ea938dc29c5e 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -554,7 +554,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap_nocache(
+ image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
@@ -1638,7 +1638,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* map registers in BAR 0 */
- ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!ca91cx42_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 7e079d39bd76..50ae26977a02 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -770,7 +770,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap_nocache(
+ image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
@@ -2317,7 +2317,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* map registers in BAR 0 */
- tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 3110791a2f1c..ee716c715710 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -139,7 +139,7 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent
dev->phys_addr = pci_resource_start(pdev, 1);
- dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
+ dev->virt_addr = ioremap(dev->phys_addr, 16384);
if (!dev->virt_addr) {
dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
__func__, dev->phys_addr, 16384);
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8a043b52aa2f..7cdb25363ea0 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -246,7 +246,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start,
+ bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
if (!bcm63xx_wdt_device.regs) {
dev_err(&pdev->dev, "failed to remap I/O resources\n");
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 6ad5bf3451ec..804e35940983 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -463,7 +463,7 @@ static int __init intel_scu_watchdog_init(void)
return -ENODEV;
}
- tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr,
20);
if (tmp_addr == NULL) {
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 1dfede0abf18..aee3c2efd565 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-#include <linux/io.h> /* For devm_ioremap_nocache */
+#include <linux/io.h> /* For devm_ioremap */
#include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */
@@ -267,7 +267,7 @@ static int rc32434_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
+ wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!wdt_reg) {
pr_err("failed to remap I/O resources\n");
return -ENXIO;
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index 8b9919c26095..70650b248de5 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -8,7 +8,7 @@
#include <linux/sched.h>
#include <xen/xen-ops.h>
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
/*
* Some hypercalls issued by the toolstack can take many 10s of
@@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
__this_cpu_write(xen_in_preemptible_hcall, true);
}
}
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */